Python源码示例:torch.dist()

示例1
def get_closest(target_word: str, word_to_idx: Dict, embeddings: torch.Tensor, n: int = 5) -> List[Tuple[str, torch.Tensor]]:
    """
    Get the n closest
    words to your word.
    """

    # Calculate distances to all other words

    word_embedding = embeddings[word_to_idx[target_word.lower()]]
    distances = []
    for word, index in word_to_idx.items():
        if word == "<MASK>" or word == target_word:
            continue
        distances.append((word, torch.dist(word_embedding, embeddings[index])))

    results = sorted(distances, key=lambda x: x[1])[1:n + 2]
    return results 
示例2
def calc_dists(preds, target, normalize, use_zero=False):
    preds = preds.float()
    target = target.float()
    normalize = normalize.float()
    dists = torch.zeros(preds.size(1), preds.size(0))
    if use_zero:
        boundary = 0
    else:
        boundary = 1
    for n in range(preds.size(0)):
        for c in range(preds.size(1)):
            if target[n,c,0] > boundary and target[n, c, 1] > boundary:
                dists[c, n] = torch.dist(preds[n,c,:], target[n,c,:])/normalize[n]
            else:
                dists[c, n] = -1
    return dists 
示例3
def calc_dists(preds, target, normalize, use_zero=False):
    preds = preds.float()
    target = target.float()
    normalize = normalize.float()
    dists = torch.zeros(preds.size(1), preds.size(0))
    if use_zero:
        boundary = 0
    else:
        boundary = 1
    for n in range(preds.size(0)):
        for c in range(preds.size(1)):
            if target[n,c,0] > boundary and target[n, c, 1] > boundary:
                dists[c, n] = torch.dist(preds[n,c,:], target[n,c,:])/normalize[n]
            else:
                dists[c, n] = -1
    return dists 
示例4
def batch_euclidean_dist(x, y):
    """
    Args:
        x: pytorch Variable, with shape [N, m, d]
        y: pytorch Variable, with shape [N, n, d]
    Returns:
        dist: pytorch Variable, with shape [N, m, n]
    """
    assert len(x.size()) == 3
    assert len(y.size()) == 3
    assert x.size(0) == y.size(0)
    assert x.size(-1) == y.size(-1)

    N, m, d = x.size()
    N, n, d = y.size()

    # shape [N, m, n]
    xx = torch.pow(x, 2).sum(-1, keepdim=True).expand(N, m, n)
    yy = torch.pow(y, 2).sum(-1, keepdim=True).expand(N, n, m).permute(0, 2, 1)
    dist = xx + yy
    dist.baddbmm_(1, -2, x, y.permute(0, 2, 1))
    dist = dist.clamp(min=1e-12).sqrt()  # for numerical stability
    return dist 
示例5
def get_obs(Asymm, H, Sx, Sy, Sz, C, E ):
    # A(phy,u,l,d,r), C(d,r), E(u,r,d)
    
    Da = Asymm.size()
    Td = torch.einsum('mefgh,nabcd->eafbgchdmn',(Asymm,Asymm)).contiguous().view(Da[1]**2, Da[2]**2, Da[3]**2, Da[4]**2, Da[0], Da[0])
    #print( torch.dist( Td, Td.permute(0,3,2,1,4,5) ) )    # test left-right reflection symmetry of Td

    CE = torch.tensordot(C,E,([1],[0]))         # C(1d)E(dga)->CE(1ga)
    EL = torch.tensordot(E,CE,([2],[0]))        # E(2e1)CE(1ga)->EL(2ega)  use E(2e1) == E(1e2) 
    EL = torch.tensordot(EL,Td,([1,2],[1,0]))   # EL(2ega)T(gehbmn)->EL(2ahbmn)
    EL = torch.tensordot(EL,CE,([0,2],[0,1]))   # EL(2ahbmn)CE(2hc)->EL(abmnc), use CE(2hc) == CE(1ga) 
    Rho = torch.tensordot(EL,EL,([0,1,4],[0,1,4])).permute(0,2,1,3).contiguous().view(Da[0]**2,Da[0]**2)
    
    # print( (Rho-Rho.t()).norm() )
    Rho = 0.5*(Rho + Rho.t())
    
    Tnorm = Rho.trace()
    Energy = torch.mm(Rho,H).trace()/Tnorm
    Mx = torch.mm(Rho,Sx).trace()/Tnorm
    My = torch.mm(Rho,Sy).trace()/Tnorm
    Mz = torch.mm(Rho,Sz).trace()/Tnorm
   
    #print("Tnorm = %g, Energy = %g " % (Tnorm.item(), Energy.item()) )

    return Energy, Mx, My, Mz 
示例6
def test(model):
    model.eval()
    from scipy import misc
    img = misc.imread('lena_299.png')
    inputs = torch.zeros(1,299,299,3)
    inputs[0] = torch.from_numpy(img)
    inputs.transpose_(1,3)
    inputs.transpose_(2,3)
    # 1, 3, 299, 299
    outputs = model.forward(torch.autograd.Variable(inputs))
    h5f = h5py.File('dump/InceptionV4/Logits.h5', 'r')
    outputs_tf = torch.from_numpy(h5f['out'][()])
    h5f.close()
    outputs = torch.nn.functional.softmax(outputs)
    print(torch.dist(outputs.data, outputs_tf))
    return outputs 
示例7
def test_conv2d(module, name):
    #global output_tf
    h5f = h5py.File('dump/InceptionResnetV2/'+name+'.h5', 'r')
    output_tf_conv = torch.from_numpy(h5f['conv_out'][()])
    output_tf_conv.transpose_(1,3)
    output_tf_conv.transpose_(2,3)
    output_tf_relu = torch.from_numpy(h5f['relu_out'][()])
    output_tf_relu.transpose_(1,3)
    output_tf_relu.transpose_(2,3)
    h5f.close()
    def test_dist_conv(self, input, output):
        print(name, 'conv', torch.dist(output.data, output_tf_conv))
    module.conv.register_forward_hook(test_dist_conv)
    def test_dist_relu(self, input, output):
        print(name, 'relu', torch.dist(output.data, output_tf_relu))
    module.relu.register_forward_hook(test_dist_relu) 
示例8
def test_conv2d(module, name):
    #global output_tf
    h5f = h5py.File('dump/InceptionResnetV2/'+name+'.h5', 'r')
    output_tf_conv = torch.from_numpy(h5f['conv_out'][()])
    output_tf_conv.transpose_(1,3)
    output_tf_conv.transpose_(2,3)
    output_tf_relu = torch.from_numpy(h5f['relu_out'][()])
    output_tf_relu.transpose_(1,3)
    output_tf_relu.transpose_(2,3)
    h5f.close()
    def test_dist_conv(self, input, output):
        print(name, 'conv', torch.dist(output.data, output_tf_conv))
    module.conv.register_forward_hook(test_dist_conv)
    def test_dist_relu(self, input, output):
        print(name, 'relu', torch.dist(output.data, output_tf_relu))
    module.relu.register_forward_hook(test_dist_relu) 
示例9
def test_conv2d(module, name):
    #global output_tf
    h5f = h5py.File('dump/InceptionResnetV2/'+name+'.h5', 'r')
    output_tf_conv = torch.from_numpy(h5f['conv_out'][()])
    output_tf_conv.transpose_(1,3)
    output_tf_conv.transpose_(2,3)
    output_tf_relu = torch.from_numpy(h5f['relu_out'][()])
    output_tf_relu.transpose_(1,3)
    output_tf_relu.transpose_(2,3)
    h5f.close()
    def test_dist_conv(self, input, output):
        print(name, 'conv', torch.dist(output.data, output_tf_conv))
    module.conv.register_forward_hook(test_dist_conv)
    def test_dist_relu(self, input, output):
        print(name, 'relu', torch.dist(output.data, output_tf_relu))
    module.relu.register_forward_hook(test_dist_relu) 
示例10
def test(model):
    model.eval()
    from scipy import misc
    img = misc.imread('lena_299.png')
    inputs = torch.zeros(1,299,299,3)
    inputs[0] = torch.from_numpy(img)
    inputs.transpose_(1,3)
    inputs.transpose_(2,3)
    # 1, 3, 299, 299
    outputs = model.forward(torch.autograd.Variable(inputs))
    h5f = h5py.File('dump/InceptionV4/Logits.h5', 'r')
    outputs_tf = torch.from_numpy(h5f['out'][()])
    h5f.close()
    outputs = torch.nn.functional.softmax(outputs)
    print(torch.dist(outputs.data, outputs_tf))
    return outputs 
示例11
def test_conv2d(module, name):
    #global output_tf
    h5f = h5py.File('dump/InceptionResnetV2/'+name+'.h5', 'r')
    output_tf_conv = torch.from_numpy(h5f['conv_out'][()])
    output_tf_conv.transpose_(1,3)
    output_tf_conv.transpose_(2,3)
    output_tf_relu = torch.from_numpy(h5f['relu_out'][()])
    output_tf_relu.transpose_(1,3)
    output_tf_relu.transpose_(2,3)
    h5f.close()
    def test_dist_conv(self, input, output):
        print(name, 'conv', torch.dist(output.data, output_tf_conv))
    module.conv.register_forward_hook(test_dist_conv)
    def test_dist_relu(self, input, output):
        print(name, 'relu', torch.dist(output.data, output_tf_relu))
    module.relu.register_forward_hook(test_dist_relu) 
示例12
def test(model):
    model.eval()
    from scipy import misc
    img = misc.imread('lena_299.png')
    inputs = torch.zeros(1,299,299,3)
    inputs[0] = torch.from_numpy(img)
    inputs.transpose_(1,3)
    inputs.transpose_(2,3)
    # 1, 3, 299, 299
    outputs = model.forward(torch.autograd.Variable(inputs))
    h5f = h5py.File('dump/InceptionV4/Logits.h5', 'r')
    outputs_tf = torch.from_numpy(h5f['out'][()])
    h5f.close()
    outputs = torch.nn.functional.softmax(outputs)
    print(torch.dist(outputs.data, outputs_tf))
    return outputs 
示例13
def similarity(self, code_vec, desc_vec):
        """
        https://arxiv.org/pdf/1508.01585.pdf 
        """
        assert self.conf['sim_measure'] in ['cos', 'poly', 'euc', 'sigmoid', 'gesd', 'aesd'], "invalid similarity measure"
        if self.conf['sim_measure']=='cos':
            return F.cosine_similarity(code_vec, desc_vec)
        elif self.conf['sim_measure']=='poly':
            return (0.5*torch.matmul(code_vec, desc_vec.t()).diag()+1)**2
        elif self.conf['sim_measure']=='sigmoid':
            return torch.tanh(torch.matmul(code_vec, desc_vec.t()).diag()+1)
        elif self.conf['sim_measure'] in ['euc', 'gesd', 'aesd']:
            euc_dist = torch.dist(code_vec, desc_vec, 2) # or torch.norm(code_vec-desc_vec,2)
            euc_sim = 1 / (1 + euc_dist)
            if self.conf['sim_measure']=='euc': return euc_sim                
            sigmoid_sim = torch.sigmoid(torch.matmul(code_vec, desc_vec.t()).diag()+1)
            if self.conf['sim_measure']=='gesd': 
                return euc_sim * sigmoid_sim
            elif self.conf['sim_measure']=='aesd':
                return 0.5*(euc_sim+sigmoid_sim) 
示例14
def inverse_distance(h, h_i, epsilon=1e-3):
  return 1 / (torch.dist(h, h_i) + epsilon) 
示例15
def update(self):
    """
    Iterate through the transition queue and make NEC updates
    """
    for t in range(len(self.transition_queue)):
      transition = self.transition_queue[t]
      state = Variable(Tensor(transition.state)).unsqueeze(0)
      action = transition.action
      state_embedding = self.embedding_network(move_to_gpu(state))
      dnd = self.dnd_list[action]

      Q_N = move_to_gpu(self.Q_lookahead(t))
      embedding_index = dnd.get_index(state_embedding)
      if embedding_index is None:
        dnd.insert(state_embedding.detach(), Q_N.detach().unsqueeze(0))
      else:
        Q = self.Q_update(dnd.values[embedding_index], Q_N)
        dnd.update(Q.detach(), embedding_index)
      self.replay_memory.push(transition.state, action,
                              move_to_gpu(Q_N.detach()))

    [dnd.commit_insert() for dnd in self.dnd_list]

    for t in range(len(self.transition_queue)):
      if t % self.update_period == 0 or t == len(self.transition_queue) - 1:
        # Train on random mini-batch from self.replay_memory
        batch = self.replay_memory.sample(self.batch_size)
        actual = torch.cat([sample.Q_N for sample in batch])
        predicted = torch.cat([self.dnd_list[sample.action].lookup(self.embedding_network(move_to_gpu(
            Variable(Tensor(sample.state))).unsqueeze(0)), update_flag=True) for sample in batch])
        loss = torch.dist(actual, move_to_gpu(predicted))
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        [dnd.update_params() for dnd in self.dnd_list]

    # Clear out transition queue
    self.transition_queue = [] 
示例16
def approx_PCKh(pred, target, idxs, res):
    # pred: b x n x 2 tensor
    # target: b x n x 2 tensor
    assert(pred.size()==target.size())
    target = target.float()
    # distances between prediction and groundtruth coordinates
    dists = torch.zeros((pred.size(1), pred.size(0)))
    normalize = res/10
    for i in range(pred.size(1)):
        for j in range(pred.size(0)):
            if target[j][i][0] > 0 and target[j][i][1] > 0:
                dists[i][j] = torch.dist(target[j][i], pred[j][i]) / normalize
            else:
                dists[i][j] = -1
    # accuracies based on the distances
    threshold = 0.5
    avg_acc = 0
    bad_idx_count = 0
    for i in range(len(idxs)):
        per_joint_dists = dists[idxs[i]]
        if torch.ne(per_joint_dists, -1).sum() > 0:
            valid_count = per_joint_dists.le(threshold).eq(per_joint_dists.ne(-1)).sum()
            all_count = per_joint_dists.ne(-1).sum()
            # print(valid_count)
            # print(type(valid_count))
            # exit()
            per_joint_acc = float(valid_count) / float(all_count)
            # print(per_joint_dists.le(threshold).eq(per_joint_dists.ne(-1)).sum())
            # print('joint {0} accuracy is {1}' .format(idxs[i]+1, per_joint_acc))
        else:
            per_joint_acc = -1
        if per_joint_acc >= 0:
            avg_acc += per_joint_acc
        else:
            bad_idx_count += 1
    avg_acc = avg_acc / (len(idxs)-bad_idx_count)
    # exit()
    return avg_acc 
示例17
def forward(self, re_img, gt_img):
        p = 2
        if self.dist_metric == 'L1':
            p = 1
        b,c,h,w = gt_img.size()
        loss = torch.dist(re_img, gt_img, p=p) / (b*h*w)
        return loss 
示例18
def forward(self, predict, gt):
        predict = predict.view(predict.size()[0], -1)
        batch, dim = predict.size()
        loss = 0.0
        for i in range(batch):
            for j in range(i, batch):
                if gt[i] == gt[j]:
                    label = 1
                else:
                    label = 0
                dist = torch.dist(predict[i], predict[j], p=2) ** 2 / dim
                loss += label * dist + (1 - label) * F.relu(self.margin - dist)
        loss = 2 * loss / (batch * (batch - 1))
        return loss 
示例19
def euclidean_dist(x, y):
    """
    Args:
        x: pytorch Variable, with shape [m, d]
        y: pytorch Variable, with shape [n, d]
    Returns:
        dist: pytorch Variable, with shape [m, n]
    """
    m, n = x.size(0), y.size(0)
    xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
    yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
    dist = xx + yy
    dist.addmm_(1, -2, x, y.t())
    dist = dist.clamp(min=1e-12).sqrt()  # for numerical stability
    return dist 
示例20
def peste_distance(self) -> np.ndarray:
        """Calculates the euclidean distance between pixels of two different arrays
        on a vector of observations, and normalizes the result applying the relativize function.
        In a more general scenario, any function that quantifies the notion of "how different two
        observations are" could work, even if it is not a proper distance.
        """
        # Get random companion
        peste_obs = self.get_peste_obs()
        # Euclidean distance between states (pixels / RAM)
        # obs = self.observations.astype(np.float32).reshape((self.n_walkers, -1))
        dist = self.wasserstein_distance(np.array(self.observations), peste_obs)
        return relativize_vector(dist) 
示例21
def _one_distance(ws1, ws2):
        dist = 0
        for w1, w2 in zip(ws1, ws2):
            dist += np.sqrt(np.sum((w1 - w2) ** 2))
        return dist 
示例22
def evaluate_distance(self) -> np.ndarray:
        """Calculates the euclidean distance between pixels of two different arrays
        on a vector of observations, and normalizes the result applying the relativize function.
        In a more general scenario, any function that quantifies the notion of "how different two
        observations are" could work, even if it is not a proper distance.
        """

        # Get random companion
        idx = np.random.permutation(np.arange(self.n_walkers, dtype=int))
        # Euclidean distance between states (pixels / RAM)
        obs = self.observations.astype(np.float32)
        dist = self.wasserstein_distance(obs[idx], obs)  # ** 2
        return relativize_vector(dist) 
示例23
def distance_from_seeds(self, obs, idx):
        dists = np.zeros(len(obs))
        for i in range(len(obs)):
            params1 = self.walk_from_seeds(obs[i], self.param_clone(self.param_state))
            params2 = self.walk_from_seeds(obs[idx[i]], self.param_clone(self.param_state))
            d = 0
            for j in range(len(params1)):
                d += torch.dist(params1[j], params2[j])
            dists[i] = d.cpu() / len(params1)
        return dists 
示例24
def calc_dists(preds, target, normalize):
    preds = preds.float()
    target = target.float()
    dists = torch.zeros(preds.size(1), preds.size(0))
    for n in range(preds.size(0)):
        for c in range(preds.size(1)):
            if target[n,c,0] > 1 and target[n, c, 1] > 1:
                dists[c, n] = torch.dist(preds[n,c,:], target[n,c,:])/normalize[n]
            else:
                dists[c, n] = -1
    return dists 
示例25
def calc_dists(preds, labels, normalize):
    dists = torch.Tensor(preds.size(1), preds.size(0))
    for i in range(preds.size(0)):
        for j in range(preds.size(1)):
            if labels[i, j, 0] == 0 and labels[i, j, 1] == 0:
                dists[j, i] = -1
            else:
                dists[j, i] = torch.dist(labels[i, j, :], preds[i, j, :]) / normalize
    return dists 
示例26
def calculate_pckh_distance(pred, target, head_length):
    return torch.dist(target, pred) / head_length 
示例27
def pdist2_slow(X, Z=None):
    if Z is None: Z = X
    D = torch.zeros(X.size(0), X.size(2), Z.size(2))

    for b in range(D.size(0)):
        for i in range(D.size(1)):
            for j in range(D.size(2)):
                D[b, i, j] = torch.dist(X[b, :, i], Z[b, :, j])
    return D 
示例28
def approx_PCKh(pred, target, idxs, res):
    # pred: b x n x 2 tensor
    # target: b x n x 2 tensor
    assert(pred.size()==target.size())
    target = target.float()
    # distances between prediction and groundtruth coordinates
    dists = torch.zeros((pred.size(1), pred.size(0)))
    normalize = res/10
    for i in range(pred.size(1)):
        for j in range(pred.size(0)):
            if target[j][i][0] > 0 and target[j][i][1] > 0:
                dists[i][j] = torch.dist(target[j][i], pred[j][i]) / normalize
            else:
                dists[i][j] = -1
    # accuracies based on the distances
    threshold = 0.5
    avg_acc = 0
    bad_idx_count = 0
    for i in range(len(idxs)):
        per_joint_dists = dists[idxs[i]]
        if torch.ne(per_joint_dists, -1).sum() > 0:
            valid_count = per_joint_dists.le(threshold).eq(per_joint_dists.ne(-1)).sum()
            all_count = per_joint_dists.ne(-1).sum()
            # print(valid_count)
            # print(type(valid_count))
            # exit()
            per_joint_acc = float(valid_count) / float(all_count)
            # print(per_joint_dists.le(threshold).eq(per_joint_dists.ne(-1)).sum())
            # print('joint {0} accuracy is {1}' .format(idxs[i]+1, per_joint_acc))
        else:
            per_joint_acc = -1
        if per_joint_acc >= 0:
            avg_acc += per_joint_acc
        else:
            bad_idx_count += 1
    avg_acc = avg_acc / (len(idxs)-bad_idx_count)
    # exit()
    return avg_acc 
示例29
def approx_PCKh_per(pred, target, idxs, res):
    # pred: b x n x 2 tensor
    # target: b x n x 2 tensor
    assert(pred.size()==target.size())
    target = target.float()
    # distances between prediction and groundtruth coordinates
    dists = torch.zeros((pred.size(1), pred.size(0)))
    normalize = res/10
    for i in range(pred.size(1)):
        for j in range(pred.size(0)):
            if target[j][i][0] > 0 and target[j][i][1] > 0:
                dists[i][j] = torch.dist(target[j][i], pred[j][i]) / normalize
            else:
                dists[i][j] = -1
    # accuracies based on the distances
    threshold = 0.5
    avg_acc = 0
    pckhs = torch.zeros(len(idxs))
    bad_idx_count = 0
    for i in range(len(idxs)):
        per_joint_dists = dists[idxs[i]]
        if torch.ne(per_joint_dists, -1).sum() > 0:
            valid_count = per_joint_dists.le(threshold).eq(per_joint_dists.ne(-1)).sum()
            all_count = per_joint_dists.ne(-1).sum()
            # print(valid_count)
            # print(type(valid_count))
            # exit()
            pckhs[i] = float(valid_count) / float(all_count)
            # print(per_joint_dists.le(threshold).eq(per_joint_dists.ne(-1)).sum())
            # print('joint {0} accuracy is {1}' .format(idxs[i]+1, per_joint_acc))
        else:
            pckhs[i] = -1
        if pckhs[i] >= 0:
            avg_acc += pckhs[i]
        else:
            bad_idx_count += 1
    avg_acc = avg_acc / (len(idxs)-bad_idx_count)
    # exit()
    return avg_acc, pckhs 
示例30
def approx_PCKh_samples(pred, target, res):
    # pred: b x n x 2 tensor
    # target: b x n x 2 tensor
    assert(pred.size()==target.size())
    sample_num = pred.size(0)
    target = target.float()
    # distances between prediction and groundtruth coordinates
    dists = torch.zeros((pred.size(1), pred.size(0)))
    normalize = res/10
    for i in range(pred.size(1)):
        for j in range(pred.size(0)):
            if target[j][i][0] > 0 and target[j][i][1] > 0:
                dists[i][j] = torch.dist(target[j][i], pred[j][i]) / normalize
            else:
                dists[i][j] = -1
    # accuracies based on the distances
    threshold = 0.5
    # accuracies = torch.zeros(sample_num)
    counts = torch.zeros(sample_num)
    bad_idx_count = 0
    for i in range(0, sample_num):
        # per_person_dists = dists[idxs, i]
        per_person_dists = dists[:, i]
        if torch.ne(per_person_dists, -1).sum() > 0:
            valid_count = per_person_dists.le(threshold).eq(per_person_dists.ne(-1)).sum()
            # all_count = per_person_dists.ne(-1).sum()
            # print(valid_count)
            # print(type(valid_count))
            # exit()
            # accuracies[i] = float(valid_count) / float(all_count)
            counts[i] = valid_count
            # print(per_joint_dists.le(threshold).eq(per_joint_dists.ne(-1)).sum())
            # print('joint {0} accuracy is {1}' .format(idxs[i]+1, per_joint_acc))
        else:
            # accuracies[i] = 0
            counts[i] = 0
    # exit()
    # return accuracies
    return counts