Esempio n. 1
0
def train():   

    train_loader = DataLoader(dataset=dataset,
                                batch_size=config.batch,
                                shuffle=True,
                                collate_fn=collate_fn,
                                num_workers=0)

    model = SVM(config.embedding, config.strmaxlen, dataset.get_vocab_size(), config.output_size)
    optimizer = optim.SGD(model.parameters(), lr=config.lr)
    model.train()
    for epoch in range(config.epoch):

        sum_loss = 0
        for i, (data, labels) in enumerate(train_loader):


            optimizer.zero_grad()
            output = model(data).squeeze()
            weight = model.weight.squeeze()
            weight = weight.reshape((weight.shape[0],1))
            
            loss = model.loss(output, labels)
            tmp = weight.t() @ weight
            loss += config.c * tmp[0][0] / 2.0

            loss.backward()
            optimizer.step()

            sum_loss += float(loss)

        print("Epoch: {:4d}\tloss: {}".format(epoch, sum_loss /len(dataset)))
Esempio n. 2
0
        if args.gpu:
            model = model.cuda()

        dataset = torch.from_numpy(np.load("../output/data/dataset_train.npy"))
        targets = torch.from_numpy(
            np.int64(np.load("../output/data/target_train.npy")))
        dataset_test = np.load(dataset_path)
        targets_test = np.int64(np.load(target_path))
        if args.L2norm:
            log_test = setup_logger(
                0, 'test_log_norm',
                os.path.join(args.log_dir, 'test_log_norm.txt'))
            log = setup_logger(
                0, 'train_log_norm',
                os.path.join(args.log_dir, 'train_log_norm.txt'))
            optimizer = Adam(model.parameters(), lr=args.lr, weight_decay=10)
        else:
            log_test = setup_logger(0, 'test_log',
                                    os.path.join(args.log_dir, 'test_log.txt'))
            log = setup_logger(0, 'train_log',
                               os.path.join(args.log_dir, 'train_log.txt'))
            optimizer = Adam(model.parameters(), lr=args.lr)
        max_accuracy = 0.0
        overfitting_cnt = 0
        f_accuracy_train = open(os.path.join(args.log_dir, 'acc_train.txt'),
                                'w')
        f_accuracy_test = open(os.path.join(args.log_dir, 'acc_test.txt'), 'w')
        f_loss = open(os.path.join(args.log_dir, 'loss.txt'), 'w')

        # # code for batch training
        # torch_dataset = data.TensorDataset(data_tensor=dataset, target_tensor=targets)