Python源码示例:torch.randint_like()

示例1
def Hash_center_multilables(labels, Hash_center): # label.shape: [batch_size, num_class], Hash_center.shape: [num_class, hash_bits]
    is_start = True
    for label in labels:
        one_labels = (label == 1).nonzero()  # find the position of 1 in label
        #if len(one_labels) == 0:    # In nus_wide dataset, some image's labels  are all zero, we ignore these images
            #Center_mean = torch.zeros((1, Hash_center.size(1))) # let it's hash center be zero
        #else:
        one_labels = one_labels.squeeze(1)
        Center_mean = torch.mean(Hash_center[one_labels], dim=0)
        Center_mean[Center_mean<0] = -1
        Center_mean[Center_mean>0] = 1
        #random_center = torch.randint_like(Hash_center[0], 2) # the random binary vector {0, 1}, has the same shape with label
        random_center[random_center==0] = -1   # the random binary vector become {-1, 1}
        Center_mean[Center_mean == 0] = random_center[Center_mean == 0]  # shape: [hash_bit]
        Center_mean = Center_mean.view(1, -1) # shape:[1,hash_bit]

        if is_start:  # the first time
            hash_center = Center_mean
            is_start = False
        else:
            hash_center = torch.cat((hash_center, Center_mean), 0)
            #hash_center = torch.stack((hash_center, Center_mean), dim=0)

    return hash_center 
示例2
def word_dropout_raw(x, l, unk_drop_prob, rand_drop_prob, vocab):
    if not unk_drop_prob and not rand_drop_prob:
        return x

    assert unk_drop_prob + rand_drop_prob <= 1

    noise = torch.rand(x.size(), dtype=torch.float).to(x.device)
    pos_idx = torch.arange(x.size(1)).unsqueeze(0).expand_as(x).to(x.device)
    token_mask = pos_idx < l.unsqueeze(1)

    x2 = x.clone()
    
    # drop to <unk> token
    if unk_drop_prob:
        unk_idx = vocab.stoi['<unk>']
        unk_drop_mask = (noise < unk_drop_prob) & token_mask
        x2.masked_fill_(unk_drop_mask, unk_idx)

    # drop to random_mask
    if rand_drop_prob:
        rand_drop_mask = (noise > 1 - rand_drop_prob) & token_mask
        rand_tokens = torch.randint_like(x, len(vocab))
        rand_tokens.masked_fill_(1 - rand_drop_mask, 0)
        x2.masked_fill_(rand_drop_mask, 0)
        x2 = x2 + rand_tokens
    
    return x2 
示例3
def rand_dropout_(x, l, drop_prob, vocab_size):
    noise = torch.rand(x.size(), dtype=torch.float).to(x.device)
    pos_idx = torch.arange(x.size(1)).unsqueeze(0).expand_as(x).to(x.device)
    token_mask = pos_idx < l.unsqueeze(1)
    rand_drop_mask = (noise < drop_prob) & token_mask
    rand_tokens = torch.randint_like(x, vocab_size)
    rand_tokens.masked_fill_(1 - rand_drop_mask, 0)
    x.masked_fill_(rand_drop_mask, 0)
    x += rand_tokens 
示例4
def trace(self, maxIter=100, tol=1e-3):
        """
        compute the trace of hessian using Hutchinson's method
        maxIter: maximum iterations used to compute trace
        tol: the relative tolerance
        """

        device = self.device
        trace_vhv = []
        trace = 0.

        for i in range(maxIter):
            self.model.zero_grad()
            v = [
                torch.randint_like(p, high=2, device=device)
                for p in self.params
            ]
            # generate Rademacher random variables
            for v_i in v:
                v_i[v_i == 0] = -1

            if self.full_dataset:
                _, Hv = self.dataloader_hv_product(v)
            else:
                Hv = hessian_vector_product(self.gradsH, self.params, v)
            trace_vhv.append(group_product(Hv, v).cpu().item())
            if abs(np.mean(trace_vhv) - trace) / (trace + 1e-6) < tol:
                return trace_vhv
            else:
                trace = np.mean(trace_vhv)

        return trace_vhv 
示例5
def get_neg_batch(head, tail, entity_num):
    neg_head = head.clone()
    neg_tail = tail.clone()
    if random.random() > 0.5:
        offset_tensor = torch.randint_like(neg_head, entity_num)
        neg_head = (neg_head + offset_tensor) % entity_num
    else:
        offset_tensor = torch.randint_like(neg_tail, entity_num)
        neg_tail = (neg_tail + offset_tensor) % entity_num
    return neg_head, neg_tail 
示例6
def __getitem__(self, i):
        dp = self.data[i]
        r = torch.randint_like(dp, -100, 100) if self.enabled else 0.0
        return dp + r * 0.01 
示例7
def forward(self, actions, batch_info=None):
        if batch_info is None:
            # Just take final value if there is no batch info
            epsilon = self.epsilon_schedule.value(1.0)
        else:
            epsilon = self.epsilon_schedule.value(batch_info['progress'])

        random_samples = torch.randint_like(actions, self.action_space.n)
        selector = torch.rand_like(random_samples, dtype=torch.float32)

        # Actions with noise applied
        noisy_actions = torch.where(selector > epsilon, actions, random_samples)

        return noisy_actions 
示例8
def test_zinb_distribution():
    theta = 100.0 + torch.rand(size=(2,))
    mu = 15.0 * torch.ones_like(theta)
    pi = torch.randn_like(theta)
    x = torch.randint_like(mu, high=20)
    log_p_ref = log_zinb_positive(x, mu, theta, pi)

    dist = ZeroInflatedNegativeBinomial(mu=mu, theta=theta, zi_logits=pi)
    log_p_zinb = dist.log_prob(x)
    assert (log_p_ref - log_p_zinb).abs().max().item() <= 1e-8

    torch.manual_seed(0)
    s1 = dist.sample((100,))
    assert s1.shape == (100, 2)
    s2 = dist.sample(sample_shape=(4, 3))
    assert s2.shape == (4, 3, 2)

    log_p_ref = log_nb_positive(x, mu, theta)
    dist = NegativeBinomial(mu=mu, theta=theta)
    log_p_nb = dist.log_prob(x)
    assert (log_p_ref - log_p_nb).abs().max().item() <= 1e-8

    s1 = dist.sample((1000,))
    assert s1.shape == (1000, 2)
    assert (s1.mean(0) - mu).abs().mean() <= 1e0
    assert (s1.std(0) - (mu + mu * mu / theta) ** 0.5).abs().mean() <= 1e0

    size = (50, 3)
    theta = 100.0 + torch.rand(size=size)
    mu = 15.0 * torch.ones_like(theta)
    pi = torch.randn_like(theta)
    x = torch.randint_like(mu, high=20)
    dist1 = ZeroInflatedNegativeBinomial(mu=mu, theta=theta, zi_logits=pi)
    dist2 = NegativeBinomial(mu=mu, theta=theta)
    assert dist1.log_prob(x).shape == size
    assert dist2.log_prob(x).shape == size

    with pytest.raises(ValueError):
        ZeroInflatedNegativeBinomial(mu=-mu, theta=theta, zi_logits=pi)
    with pytest.warns(UserWarning):
        dist1.log_prob(-x)  # ensures neg values raise warning
    with pytest.warns(UserWarning):
        dist2.log_prob(0.5 * x)  # ensures float values raise warning