Python源码示例:torch.sparse_coo_tensor()

示例1
def _compute_laplacian(self):
    """Precomputes the graph Laplacian."""
    self._recompute_laplacian = False
    indices = [
      (node, edge)
      for node, edges in enumerate(self.adjacency)
      for edge in edges + [node]
    ]
    values = torch.zeros(len(indices))
    for idx, index in enumerate(indices):
      values[idx] = self._laplacian_element(*index)
    indices = torch.Tensor(indices).t()
    self._laplacian = torch.sparse_coo_tensor(
      indices, values,
      (len(self.adjacency), len(self.adjacency))
    ) 
示例2
def _generate_adj(self, sample1, sample2):
        edgelist = []
        mapping = {}
        for i in range(len(sample1)):
            mapping[sample1[i]] = i

        for i in range(len(sample2)):
            nodes = self.adj[sample2[i]]
            for node in nodes:
                if node in mapping:
                    edgelist.append([mapping[node], i])
        edgetensor = torch.LongTensor(edgelist)
        valuetensor = torch.ones(edgetensor.shape[0]).float()
        t = torch.sparse_coo_tensor(
            edgetensor.t(), valuetensor, (len(sample1), len(sample2))
        )
        return t 
示例3
def make_batch_align_matrix(index_tensor, size=None, normalize=False):
    """
    Convert a sparse index_tensor into a batch of alignment matrix,
    with row normalize to the sum of 1 if set normalize.

    Args:
        index_tensor (LongTensor): ``(N, 3)`` of [batch_id, tgt_id, src_id]
        size (List[int]): Size of the sparse tensor.
        normalize (bool): if normalize the 2nd dim of resulting tensor.
    """
    n_fill, device = index_tensor.size(0), index_tensor.device
    value_tensor = torch.ones([n_fill], dtype=torch.float)
    dense_tensor = torch.sparse_coo_tensor(
        index_tensor.t(), value_tensor, size=size, device=device).to_dense()
    if normalize:
        row_sum = dense_tensor.sum(-1, keepdim=True)  # sum by row(tgt)
        # threshold on 1 to avoid div by 0
        torch.nn.functional.threshold(row_sum, 1, 1, inplace=True)
        dense_tensor.div_(row_sum)
    return dense_tensor 
示例4
def build_adj(edge_index, num_nodes):
    """
    for undirected graph
    :param edge_index:
    :param num_nodes:
    :return:
    """
    if num_nodes is None:
        num_nodes = max(edge_index[0]) + 1
    edge_attr = torch.ones(edge_index.size(1), dtype=torch.float)
    size = torch.Size([num_nodes, num_nodes])
    adj = torch.sparse_coo_tensor(edge_index, edge_attr, size)
    eye = torch.arange(start=0, end=num_nodes)
    eye = torch.stack([eye, eye])
    eye = torch.sparse_coo_tensor(eye, torch.ones([num_nodes]), size)
    adj = adj.t() + adj + eye  # greater than 1 when edge_index is already symmetrical

    adj = adj.to_dense().gt(0).to_sparse().type(torch.float)

    return adj 
示例5
def __call__(self, data):
        assert data.edge_index is not None

        orig_num_nodes = data.num_nodes
        if self.num_nodes is None:
            num_nodes = orig_num_nodes
        else:
            assert orig_num_nodes <= self.num_nodes
            num_nodes = self.num_nodes

        if data.edge_attr is None:
            edge_attr = torch.ones(data.edge_index.size(1), dtype=torch.float)
        else:
            edge_attr = data.edge_attr

        size = torch.Size([num_nodes, num_nodes] + list(edge_attr.size())[1:])
        adj = torch.sparse_coo_tensor(data.edge_index, edge_attr, size)
        data.adj = adj.to_dense()
        data.edge_index = None
        data.edge_attr = None

        data.mask = torch.zeros(num_nodes, dtype=torch.bool)
        data.mask[:orig_num_nodes] = 1

        if data.x is not None:
            size = [num_nodes - data.x.size(0)] + list(data.x.size())[1:]
            data.x = torch.cat([data.x, data.x.new_zeros(size)], dim=0)

        if data.pos is not None:
            size = [num_nodes - data.pos.size(0)] + list(data.pos.size())[1:]
            data.pos = torch.cat([data.pos, data.pos.new_zeros(size)], dim=0)

        if data.y is not None and (data.y.size(0) == orig_num_nodes):
            size = [num_nodes - data.y.size(0)] + list(data.y.size())[1:]
            data.y = torch.cat([data.y, data.y.new_zeros(size)], dim=0)

        return data 
示例6
def test_gcn_conv_with_sparse_input_feature():
    x = torch.sparse_coo_tensor(indices=torch.tensor([[0, 0], [0, 1]]),
                                values=torch.tensor([1., 1.]),
                                size=torch.Size([4, 16]))
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])

    conv = GCNConv(16, 32)
    assert conv(x, edge_index).size() == (4, 32) 
示例7
def sparse_matrix(data, index, shape, force_format=False):
    fmt = index[0]
    if fmt != 'coo':
        raise TypeError('Pytorch backend only supports COO format. But got %s.' % fmt)
    spmat = th.sparse_coo_tensor(index[1], data, shape)
    return spmat, None 
示例8
def neighbourhood_to_adjacency(neighbourhood):
  size = torch.Size([len(neighbourhood), len(neighbourhood)])
  indices = []
  for idx, nodes in neighbourhood:
    for node in nodes:
      indices.append([idx, node])
      indices.append([node, idx])
  indices = torch.Tensor(list(set(indices)))
  values = torch.ones(indices.size(0))
  return torch.sparse_coo_tensor(indices, values, size) 
示例9
def _compute_adjacency_matrix(self):
    """Computes the graph adjacency matrix."""
    self._recompute_adjacency_matrix = False
    indices = torch.Tensor([
      (node, edge)
      for node, edges in enumerate(self.adjacency)
      for edge in edges + [node]
    ]).t()
    values = torch.ones(indices.size(1))
    self._adjacency_matrix = torch.sparse_coo_tensor(
      indices, values,
      (len(self.adjacency), len(self.adjacency))
    ) 
示例10
def forward(ctx, indices, values, shape, b):
        assert indices.requires_grad == False
        a = torch.sparse_coo_tensor(indices, values, shape)
        ctx.save_for_backward(a, b)
        ctx.N = shape[0]
        return torch.matmul(a, b) 
示例11
def build_fixation_maps(Ns, Ys, Xs, batch_size, height, width, dtype=torch.float32):
    indices = torch.stack((Ns, Ys, Xs), axis=1).T
    src = torch.ones(indices.shape[1], dtype=dtype, device=indices.device)
    fixation_maps = torch.sparse_coo_tensor(indices, src, size=(batch_size, height, width)).to_dense()

    return fixation_maps 
示例12
def test_sparse_tensor(self, to_tensor, device):
        if device == 'cuda' and not torch.cuda.is_available():
            pytest.skip()

        inp = sparse.csr_matrix(np.zeros((5, 3)).astype(np.float32))
        expected = torch.sparse_coo_tensor(size=(5, 3)).to(device)

        result = to_tensor(inp, device=device, accept_sparse=True)
        assert self.tensors_equal(result, expected) 
示例13
def index2matrix(index):
    assert index.size(0) == 2

    index = index.long()
    v_len = index.size(1)
    v = torch.ones(v_len).float()
    matrix = torch.sparse_coo_tensor(index, v).to_dense()
    return matrix 
示例14
def sparse_repeat(sparse, *repeat_sizes):
    """
    """
    if len(repeat_sizes) == 1 and isinstance(repeat_sizes, tuple):
        repeat_sizes = repeat_sizes[0]

    if len(repeat_sizes) > len(sparse.shape):
        num_new_dims = len(repeat_sizes) - len(sparse.shape)
        new_indices = sparse._indices()
        new_indices = torch.cat(
            [
                torch.zeros(num_new_dims, new_indices.size(1), dtype=new_indices.dtype, device=new_indices.device),
                new_indices,
            ],
            0,
        )
        sparse = torch.sparse_coo_tensor(
            new_indices,
            sparse._values(),
            torch.Size((*[1 for _ in range(num_new_dims)], *sparse.shape)),
            dtype=sparse.dtype,
            device=sparse.device,
        )

    for i, repeat_size in enumerate(repeat_sizes):
        if repeat_size > 1:
            new_indices = sparse._indices().repeat(1, repeat_size)
            adding_factor = torch.arange(0, repeat_size, dtype=new_indices.dtype, device=new_indices.device).unsqueeze_(
                1
            )
            new_indices[i].view(repeat_size, -1).add_(adding_factor)
            sparse = torch.sparse_coo_tensor(
                new_indices,
                sparse._values().repeat(repeat_size),
                torch.Size((*sparse.shape[:i], repeat_size * sparse.size(i), *sparse.shape[i + 1 :])),
                dtype=sparse.dtype,
                device=sparse.device,
            )

    return sparse 
示例15
def from_adjlist(self, adj):
        """Transfer adj-list format to sparsetensor"""
        u_sampled, index = torch.unique(torch.flatten(adj), return_inverse=True)

        row = (torch.range(0, index.shape[0]-1) / adj.shape[1]).long().to(adj.device)
        col = index
        values = torch.ones(index.shape[0]).float().to(adj.device)
        indices = torch.cat([row.unsqueeze(1), col.unsqueeze(1)], axis=1).t()
        dense_shape = (adj.shape[0], u_sampled.shape[0])

        support = torch.sparse_coo_tensor(indices, values, dense_shape)

        return support, u_sampled.long() 
示例16
def forward(self, input, edge_index):
        adj = torch.sparse_coo_tensor(
            edge_index,
            torch.ones(edge_index.shape[1]).float(),
            (input.shape[0], input.shape[0]),
        ).to(input.device)
        support = torch.mm(input, self.weight)
        output = torch.spmm(adj, support)
        if self.bias is not None:
            return output + self.bias
        else:
            return output 
示例17
def forward(self, x, edge_index):
        for i in range(self.num_layers):
            edge_index_sp = self.sampler(edge_index, self.sample_size[i]).to(x.device)
            adj_sp = torch.sparse_coo_tensor(
                edge_index_sp,
                torch.ones(edge_index_sp.shape[1]).float(),
                (x.shape[0], x.shape[0]),
            ).to(x.device)
            x = self.convs[i](x, adj_sp)
            if i != self.num_layers - 1:
                x = F.relu(x)
                x = F.dropout(x, p=self.dropout, training=self.training)
        return F.log_softmax(x, dim=1) 
示例18
def forward(self, x, edge_index, edge_weight=None):
        edge_index, _ = remove_self_loops(edge_index)
        edge_weight = torch.ones(edge_index.shape[1]) if edge_weight is None else edge_weight
        adj = torch.sparse_coo_tensor(edge_index, edge_weight, (x.shape[0], x.shape[0]))
        adj = adj.to(x.device)
        out = (1 + self.eps) * x + torch.spmm(adj, x)
        if self.apply_func is not None:
            out = self.apply_func(out)
        return out 
示例19
def forward(self, x, edge_index, batch, edge_weight=None):
        embed = self.embd_gnn(x, edge_index)
        pooled = F.softmax(self.pool_gnn(x, edge_index), dim=-1)
        device = x.device
        masked_tensor = []
        value_set, value_counts = torch.unique(batch, return_counts=True)
        batch_size = len(value_set)
        for i in value_counts:
            masked = torch.ones((i, int(pooled.size()[1]/batch_size)))
            masked_tensor.append(masked)
        masked = torch.FloatTensor(block_diag(*masked_tensor)).to(device)

        result = torch.nn.functional.softmax(masked * pooled, dim=-1)
        result = result * masked
        result = result / (result.sum(dim=-1, keepdim=True) + 1e-13)
        # result = masked_softmax(pooled, masked, memory_efficient=False)

        h = torch.matmul(result.t(), embed)
        if not edge_weight:
            edge_weight = torch.ones(edge_index.shape[1]).to(x.device)
        adj = torch.sparse_coo_tensor(edge_index, edge_weight)
        adj_new = torch.sparse.mm(adj, result)
        adj_new = torch.mm(result.t(), adj_new)

        if self.use_link_pred:
            adj_loss = torch.norm((adj.to_dense() - torch.mm(result, result.t()))) / np.power((len(batch)), 2)
            self.loss_dict["adj_loss"] = adj_loss
        entropy_loss = (torch.distributions.Categorical(probs=pooled).entropy()).mean()
        assert not torch.isnan(entropy_loss)
        self.loss_dict["entropy_loss"] = entropy_loss
        return adj_new, h 
示例20
def forward(self, x, edge_index):
        adj = torch.sparse_coo_tensor(
            edge_index,
            torch.ones(edge_index.shape[1]).float(),
            (x.shape[0], x.shape[0]),
            device=x.device
        )
        output_list = []
        for p, linear in zip(self.adj_pows, self.linears):
            output = linear(self.adj_pow_x(x, adj, p))
            output_list.append(output)
        
        return torch.cat(output_list, dim=1) 
示例21
def forward(ctx, indices, values, shape, b):
        assert indices.requires_grad == False
        a = torch.sparse_coo_tensor(indices, values, shape)
        ctx.save_for_backward(a, b)
        ctx.N = shape[0]
        return torch.matmul(a, b) 
示例22
def to_torch_sparse(index, value, m, n):
    return torch.sparse_coo_tensor(index.detach(), value, (m, n)) 
示例23
def to_torch_sparse_coo_tensor(
            self, dtype: Optional[int] = None) -> torch.Tensor:
        row, col, value = self.coo()
        index = torch.stack([row, col], dim=0)

        if value is None:
            value = torch.ones(self.nnz(), dtype=dtype, device=self.device())

        return torch.sparse_coo_tensor(index, value, self.sizes())


# Python Bindings ############################################################# 
示例24
def forward(self, input, adj):

        alp = self.alpha(adj[1]).t()[0]
        A = torch.sparse_coo_tensor(adj[0], alp, torch.Size([adj[2],adj[2]]), requires_grad = True)
        A = A + A.transpose(0, 1)
        support = torch.mm(input, self.weight)
        output = torch.sparse.mm(A, support)

        if self.bias is not None:
            return output + self.bias
        else:
            return output 
示例25
def target_prefixing(self, log_probs):
        """Fix the first part of predictions with `self.target_prefix`.

        Args:
            log_probs (FloatTensor): logits of size ``(B, vocab_size)``.

        Returns:
            log_probs (FloatTensor): modified logits in ``(B, vocab_size)``.
        """
        _B, vocab_size = log_probs.size()
        step = len(self)
        if (self.target_prefix is not None and
                step <= self.target_prefix.size(1)):
            pick_idx = self.target_prefix[:, step - 1].tolist()  # (B)
            pick_coo = [[path_i, pick] for path_i, pick in enumerate(pick_idx)
                        if pick not in [self.eos, self.pad]]
            mask_pathid = [path_i for path_i, pick in enumerate(pick_idx)
                           if pick in [self.eos, self.pad]]
            if len(pick_coo) > 0:
                pick_coo = torch.tensor(pick_coo).to(self.target_prefix)
                pick_fill_value = torch.ones(
                    [pick_coo.size(0)], dtype=log_probs.dtype)
                # pickups: Tensor where specified index were set to 1, others 0
                pickups = torch.sparse_coo_tensor(
                    pick_coo.t(), pick_fill_value,
                    size=log_probs.size(), device=log_probs.device).to_dense()
                # dropdowns: opposite of pickups, 1 for those shouldn't pick
                dropdowns = torch.ones_like(pickups) - pickups
                if len(mask_pathid) > 0:
                    path_mask = torch.zeros(_B).to(self.target_prefix)
                    path_mask[mask_pathid] = 1
                    path_mask = path_mask.unsqueeze(1).to(dtype=bool)
                    dropdowns = dropdowns.masked_fill(path_mask, 0)
                # Minus dropdowns to log_probs making probabilities of
                # unspecified index close to 0
                log_probs -= 10000*dropdowns
        return log_probs 
示例26
def __init__(self, model_path='./model.pkl', sparse=True):
        super().__init__()

        self.parent = None
        self.model_path = None
        if model_path is not None:
            with open(model_path, 'rb') as f:
                self.model_path = model_path
                params = pickle.load(f)
                # The first three can be added simply:
                registerbuffer = lambda name: self.register_buffer(name,
                                                                   torch.as_tensor(params[name]))
                registerbuffer('weights')
                registerbuffer('posedirs')
                registerbuffer('v_template')
                registerbuffer('shapedirs')

                # Now for the more difficult...:
                # We have to convert f from uint32 to int32. (This is the indexbuffer)
                self.register_buffer('f', torch.as_tensor(params['f'].astype(np.int32)))
                self.register_buffer('kintree_table', torch.as_tensor(params['kintree_table'].astype(np.int32)))

                # J_regressor is a sparse tensor. This is (experimentally) supported in PyTorch.
                J_regressor = params['J_regressor']
                if scipy.sparse.issparse(J_regressor):
                    # If tensor is sparse (Which it is with SMPL/SMIL)
                    J_regressor = J_regressor.tocoo()
                    J_regressor = torch.sparse_coo_tensor([J_regressor.row, J_regressor.col],
                                                          J_regressor.data,
                                                          J_regressor.shape)
                    if not sparse:
                        J_regressor = J_regressor.to_dense()
                else:
                    J_regressor = torch.as_tensor(J_regressor)
                self.register_buffer('J_regressor', J_regressor)

                self.register_buffer('e4', self.posedirs.new_tensor([0, 0, 0, 1]))  # Cache this. (Saves a lot of time)
                self.register_buffer('eye', torch.eye(3, dtype=self.e4.dtype, device=self.e4.device))  # And this.
                self.set_parent()

        # Make sure the tree map is reconstructed if/when model is loaded.
        self._register_state_dict_hook(self.set_parent) 
示例27
def to_tensor(X, device, accept_sparse=False):
    """Turn input data to torch tensor.

    Parameters
    ----------
    X : input data
      Handles the cases:
        * PackedSequence
        * numpy array
        * torch Tensor
        * scipy sparse CSR matrix
        * list or tuple of one of the former
        * dict with values of one of the former

    device : str, torch.device
      The compute device to be used. If set to 'cuda', data in torch
      tensors will be pushed to cuda tensors before being sent to the
      module.

    accept_sparse : bool (default=False)
      Whether to accept scipy sparse matrices as input. If False,
      passing a sparse matrix raises an error. If True, it is
      converted to a torch COO tensor.

    Returns
    -------
    output : torch Tensor

    """
    to_tensor_ = partial(to_tensor, device=device)

    if is_torch_data_type(X):
        return to_device(X, device)
    if isinstance(X, dict):
        return {key: to_tensor_(val) for key, val in X.items()}
    if isinstance(X, (list, tuple)):
        return [to_tensor_(x) for x in X]
    if np.isscalar(X):
        return torch.as_tensor(X, device=device)
    if isinstance(X, Sequence):
        return torch.as_tensor(np.array(X), device=device)
    if isinstance(X, np.ndarray):
        return torch.as_tensor(X, device=device)
    if sparse.issparse(X):
        if accept_sparse:
            return torch.sparse_coo_tensor(
                X.nonzero(), X.data, size=X.shape).to(device)
        raise TypeError("Sparse matrices are not supported. Set "
                        "accept_sparse=True to allow sparse matrices.")

    raise TypeError("Cannot convert this data type to a torch tensor.") 
示例28
def bdsmm(sparse, dense):
    """
    Batch dense-sparse matrix multiply
    """
    # Make the batch sparse matrix into a block-diagonal matrix
    if sparse.ndimension() > 2:
        # Expand the tensors to account for broadcasting
        output_shape = _matmul_broadcast_shape(sparse.shape, dense.shape)
        expanded_sparse_shape = output_shape[:-2] + sparse.shape[-2:]
        unsqueezed_sparse_shape = [1 for _ in range(len(output_shape) - sparse.dim())] + list(sparse.shape)
        repeat_sizes = tuple(
            output_size // sparse_size
            for output_size, sparse_size in zip(expanded_sparse_shape, unsqueezed_sparse_shape)
        )
        sparse = sparse_repeat(sparse, *repeat_sizes)
        dense = dense.expand(*output_shape[:-2], dense.size(-2), dense.size(-1))

        # Figure out how much need to be added to the row/column indices to create
        # a block-diagonal matrix
        *batch_shape, num_rows, num_cols = sparse.shape
        batch_size = torch.Size(batch_shape).numel()
        batch_multiplication_factor = torch.tensor(
            [torch.Size(batch_shape[i + 1 :]).numel() for i in range(len(batch_shape))],
            dtype=torch.long,
            device=sparse.device,
        )
        if batch_multiplication_factor.is_cuda:
            batch_assignment = (sparse._indices()[:-2].float().t() @ batch_multiplication_factor.float()).long()
        else:
            batch_assignment = sparse._indices()[:-2].t() @ batch_multiplication_factor

        # Create block-diagonal sparse tensor
        indices = sparse._indices()[-2:].clone()
        indices[0].add_(batch_assignment, alpha=num_rows)
        indices[1].add_(batch_assignment, alpha=num_cols)
        sparse_2d = torch.sparse_coo_tensor(
            indices,
            sparse._values(),
            torch.Size((batch_size * num_rows, batch_size * num_cols)),
            dtype=sparse._values().dtype,
            device=sparse._values().device,
        )

        dense_2d = dense.reshape(batch_size * num_cols, -1)
        res = torch.dsmm(sparse_2d, dense_2d)
        res = res.view(*batch_shape, num_rows, -1)
        return res

    elif dense.dim() > 2:
        *batch_shape, num_rows, num_cols = dense.size()
        batch_size = torch.Size(batch_shape).numel()
        dense = dense.view(batch_size, num_rows, num_cols)
        res = torch.dsmm(sparse, dense.transpose(0, 1).reshape(-1, batch_size * num_cols))
        res = res.view(-1, batch_size, num_cols)
        res = res.transpose(0, 1).reshape(*batch_shape, -1, num_cols)
        return res

    else:
        return torch.dsmm(sparse, dense) 
示例29
def sparse_getitem(sparse, idxs):
    """
    """
    if not isinstance(idxs, tuple):
        idxs = (idxs,)

    if not sparse.ndimension() <= 2:
        raise RuntimeError("Must be a 1d or 2d sparse tensor")

    if len(idxs) > sparse.ndimension():
        raise RuntimeError("Invalid index for %d-order tensor" % sparse.ndimension())

    indices = sparse._indices()
    values = sparse._values()
    size = list(sparse.size())

    for i, idx in list(enumerate(idxs))[::-1]:
        if isinstance(idx, int):
            del size[i]
            mask = indices[i].eq(idx)
            if torch.any(mask):
                new_indices = torch.zeros(
                    indices.size(0) - 1, torch.sum(mask), dtype=indices.dtype, device=indices.device
                )
                for j in range(indices.size(0)):
                    if i > j:
                        new_indices[j].copy_(indices[j][mask])
                    elif i < j:
                        new_indices[j - 1].copy_(indices[j][mask])
                indices = new_indices
                values = values[mask]
            else:
                indices.resize_(indices.size(0) - 1, 1).zero_()
                values.resize_(1).zero_()

            if not len(size):
                return sum(values)

        elif isinstance(idx, slice):
            start, stop, step = idx.indices(size[i])
            size = list(size[:i]) + [stop - start] + list(size[i + 1 :])
            if step != 1:
                raise RuntimeError("Slicing with step is not supported")
            mask = indices[i].lt(stop) & indices[i].ge(start)
            if torch.any(mask):
                new_indices = torch.zeros(indices.size(0), torch.sum(mask), dtype=indices.dtype, device=indices.device)
                for j in range(indices.size(0)):
                    new_indices[j].copy_(indices[j][mask])
                new_indices[i].sub_(start)
                indices = new_indices
                values = values[mask]
            else:
                indices.resize_(indices.size(0), 1).zero_()
                values.resize_(1).zero_()

        else:
            raise RuntimeError("Unknown index type")

    return torch.sparse_coo_tensor(indices, values, torch.Size(size), dtype=values.dtype, device=values.device)