Exemplo n.º 1
0
def seq_data_iter_random(corpus, batch_size, num_steps):
    # Offset the iterator over the data for uniform starts
    corpus = corpus[random.randint(0, num_steps):]
    # Subtract 1 extra since we need to account for label
    num_examples = ((len(corpus) - 1) // num_steps)
    example_indices = list(range(0, num_examples * num_steps, num_steps))
    random.shuffle(example_indices)

    def data(pos):
        # This returns a sequence of the length num_steps starting from pos
        return corpus[pos:pos + num_steps]

    # Discard half empty batches
    num_batches = num_examples // batch_size
    for i in range(0, batch_size * num_batches, batch_size):
        # Batch_size indicates the random examples read each time
        batch_indices = example_indices[i:(i + batch_size)]
        X = [data(j) for j in batch_indices]
        Y = [data(j + 1) for j in batch_indices]
        yield torch.Tensor(X), torch.Tensor(Y)
Exemplo n.º 2
0
def seq_data_iter_random(corpus, batch_size, num_steps):
    """使用随机抽样生成一小批子序列。"""
    # 从随机偏移量(包括`num_steps - 1`)开始对序列进行分区
    corpus = corpus[random.randint(0, num_steps - 1):]
    # 减去1,因为我们需要考虑标签
    num_subseqs = (len(corpus) - 1) // num_steps
    # 长度为`num_steps`的子序列的起始索引
    initial_indices = list(range(0, num_subseqs * num_steps, num_steps))
    # 在随机抽样中,迭代过程中两个相邻随机小批量的子序列不一定在原始序列上相邻
    random.shuffle(initial_indices)

    def data(pos):
        # 返回从`pos`开始的长度为`num_steps`的序列
        return corpus[pos:pos + num_steps]

    num_batches = num_subseqs // batch_size
    for i in range(0, batch_size * num_batches, batch_size):
        # 这里,`initial_indices`包含子序列的随机起始索引
        initial_indices_per_batch = initial_indices[i:i + batch_size]
        X = [data(j) for j in initial_indices_per_batch]
        Y = [data(j + 1) for j in initial_indices_per_batch]
        yield d2l.tensor(X), d2l.tensor(Y)
Exemplo n.º 3
0
 def _get_words_dict(self, data, max_words):
     word_counter = Counter(w.lower_ for d in self.nlp.tokenizer.pipe((
         doc for doc in tqdm(data(), desc="Tokenizing data"))) for w in d)
     dict_w = {
         w: i
         for i, (w, _) in tqdm(enumerate(
             word_counter.most_common(max_words), start=2),
                               desc="building word dict",
                               total=max_words)
     }
     dict_w["_padding_"] = 0
     dict_w["_unk_word_"] = 1
     print("Dictionnary has {} words".format(len(dict_w)))
     return dict_w
Exemplo n.º 4
0
def seq_data_iter_random(corpus, batch_size, num_steps):  #@save
    # Start with a random offset to partition a sequence
    corpus = corpus[random.randint(0, num_steps):]
    # Subtract 1 since we need to account for labels
    num_subseqs = (len(corpus) - 1) // num_steps
    # The starting indices for subsequences of length `num_steps`
    initial_indices = list(range(0, num_subseqs * num_steps, num_steps))
    # In random sampling, the subsequences from two adjacent random
    # minibatches during iteration are not necessarily adjacent on the
    # original sequence
    random.shuffle(initial_indices)

    def data(pos):
        # Return a sequence of length `num_steps` starting from `pos`
        return corpus[pos: pos + num_steps]

    num_subseqs_per_example = num_subseqs // batch_size
    for i in range(0, batch_size * num_subseqs_per_example, batch_size):
        # Here, `initial_indices` contains randomized starting indices for
        # subsequences
        initial_indices_per_batch = initial_indices[i: i + batch_size]
        X = [data(j) for j in initial_indices_per_batch]
        Y = [data(j + 1) for j in initial_indices_per_batch]
        yield d2l.tensor(X), d2l.tensor(Y)
def dataset(num_trials: int, n_time: int, Inp_dim: int, tau: int = 1000,
            latents: Tuple[Tuple[str]] = (('x',), ('y',), ('theta',)), num_peaks: Optional[Tuple[int]] = None,
            peak_width_factors: Optional[Union[Tuple[Tuple[float]], float]] = 0.1, sigma_mult: float = 0.,
            sigma_add: float = 0., freeze_epochs: bool = True, train_perc: float = 0.8, return_latents: bool = False):
    if not freeze_epochs:
        raise AttributeError("freeze_epochs=False option isn't implemented yet")
    else:
        # class LatentVariableReceptiveFieldEncoding(torch.utils.data.Dataset):
        #     def __init__(self):
        #         pass

        inputs, targets, latent_vals = data(num_trials, n_time, Inp_dim, tau, latents, num_peaks, peak_width_factors,
                                            sigma_mult, sigma_add)

        train_time = int(round(train_perc * n_time))

        train_inputs = inputs[:, :train_time]
        train_inputs = train_inputs.reshape(-1, train_inputs.shape[-1])
        val_inputs = inputs[:, train_time:]
        val_inputs = val_inputs.reshape(-1, val_inputs.shape[-1])

        train_targets = targets[:, :train_time]
        train_targets = train_targets.reshape(-1, train_targets.shape[-1])
        val_targets = targets[:, train_time:]
        val_targets = val_targets.reshape(-1, val_targets.shape[-1])

        train_dataset = InpData(train_inputs, train_targets)
        val_dataset = InpData(val_inputs, val_targets)

        train_latent_vals = []
        val_latent_vals = []
        for group in latent_vals:
            train_latent_vals.append([])
            val_latent_vals.append([])
            for x in group:
                train_latent_vals[-1].append(x[:, :train_time].reshape(-1))
                val_latent_vals[-1].append(x[:, train_time:].reshape(-1))

        if return_latents:
            return train_dataset, val_dataset, train_latent_vals, val_latent_vals
        else:
            return train_dataset, val_dataset
Exemplo n.º 6
0
trace.save('resnet18.pt')
print('---')

print(trace.graph)

data = torch.jit.load('resnet18.pt')

input = torch.rand(64, 3, 224, 224)

s = 0
s2 = 0

for i in range(0, 30):
    t = time.time()
    data(input)
    s = time.time() - t

    t = time.time()
    model(input)
    s2 = time.time() - t

print('JIT {}'.format(s))
print('Python: {}'.format(s2))

s = 0
s2 = 0

for i in range(0, 30):
    t = time.time()
    data(input)
Exemplo n.º 7
0
LOG_FOUT = open(os.path.join(save_dir, 'log_test.txt'), 'w')
LOG_FOUT.write(str(save_dir) + '\n')
viz_dir = os.path.join(save_dir, 'visualize')
train_viz = os.path.join(viz_dir, 'train')
test_viz = os.path.join(viz_dir, 'test')
if not os.path.exists(viz_dir):
    os.mkdir(viz_dir)
    os.mkdir(train_viz)
    os.mkdir(test_viz)

os.environ['CUDA_VISIBLE_DEVICES'] = '3'
if not test_model:
    raise NameError('please set the test_model file to choose the checkpoint!')

# read dataset
trainset = data(root=data_dir, is_train=True, data_len=100)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=BATCH_SIZE,
                                          shuffle=False,
                                          num_workers=8,
                                          drop_last=False)
testset = data(root=data_dir, is_train=False, data_len=100)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=BATCH_SIZE,
                                         shuffle=False,
                                         num_workers=8,
                                         drop_last=False)

# define model
net = model.attention_net(topN=PROPOSAL_NUM)
log_string('Loading {}'.format(test_model))
Exemplo n.º 8
0
def main():
    model=Tiny(200)
    model=init_weight(model)
    model.to(device)

    if not os.path.exists('./Tiny_IBP_Trained_Models_lr_0.001_clean_train'):
        os.makedirs('./Tiny_IBP_Trained_Models_lr_0.001_clean_train')
    method = 'BCP_model'
    output_dir = './Tiny_IBP_Trained_Models_lr_0.001_clean_train/'

    start_epoch = 0
    epochs = 500

    batch_counter = 0
    LR=args.lr
    optimizer = optim.Adam(model.parameters(), lr=LR)

    EPS_TRAIN=1/255
    EPS_TEST=1/255
    eps_sch=generate_epsilon_schedule_ImageNet(EPS_TRAIN)
    kappa_sch=generate_kappa_schedule_ImageNet()
# ***********************************dataset setup ***********************************************

    train_dataset = data('train', transform=transforms.Compose([transforms.RandomCrop(64, padding=4),
            transforms.RandomHorizontalFlip(),transforms.ToTensor()]))
    val_dataset = data('val', transform=transforms.Compose([transforms.ToTensor()]))

    batch_size = 32

    train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False)

# ************************************start training***********************************************

    clean_err_list = []
    normal_loss_list = []
    reg_loss_list = []

  
    a_txt=open(args.out, 'w')
    print(args,file=a_txt)
    a_txt.close()
    
    for epoch in range(start_epoch, epochs):
        print("This is  ## Tiny ImageNet ##  epoch: ", epoch)
        robust_loss=0
        robust_err=0
        if epoch>=args.warm:
          model,normal_loss,robust_loss,reg_loss=robust_train(train_loader, model, eps_sch, device, kappa_sch, batch_counter, optimizer)
        else:
          model, normal_loss, reg_loss = clean_train(train_loader, model, device,opt=optimizer)
        batch_counter += 1
        test_loss, prec1, prec5 = clean_test(val_loader, model, device)
        if epoch>=args.warm:
          robust_err,robust_loss=robust_test(val_loader,model,EPS_TEST,device)

        save_name = os.path.join(output_dir, '{}_epoch_{}_robust_{}.h5'.format(method, epoch,1-robust_err))
        save_net(save_name, model)

        clean_err_list.append(test_loss)
        normal_loss_list.append(normal_loss)
        reg_loss_list.append(reg_loss)
        if epoch==args.drop:
            for param_group in optimizer.param_groups:
                param_group["lr"] = LR*0.1

        train_loss_txt = open(args.out, 'a')
        train_loss_txt.write('\nepoch %s ' % str(epoch))
        train_loss_txt.write('\nnormal_loss %s ' % str(normal_loss_list[epoch]))
        train_loss_txt.write('\nreg_loss %s ' % str(reg_loss_list[epoch]))
        train_loss_txt.write('\nprec5 %s ' % str(prec5 ))
        train_loss_txt.write('\nprec1 %s ' % str( prec1))
        train_loss_txt.write('\nrobust prec1 %s ' % str( 1-robust_err))
        train_loss_txt.write('\nrobust loss %s ' % str( robust_loss))
        train_loss_txt.write('\n')
        train_loss_txt.close()
        print('epoch %s ' % str(epoch))
        print('normal_loss %s ' % str(normal_loss_list[epoch]))
        print('reg_loss %s ' % str(reg_loss_list[epoch]))
        print('prec5 %s ' % str(prec5 ))
        print('prec1 %s ' % str( prec1))
        print('robust prec1 %s ' % str( 1-robust_err))
        print('robust loss %s ' % str( robust_loss))