Beispiel #1
0
def disagg_fold(dataset, fold_num, lr, p):
    train, test = get_train_test(dataset,
                                 num_folds=num_folds,
                                 fold_num=fold_num)
    valid = train[int(0.8 * len(train)):].copy()
    train = train[:int(0.8 * len(train))].copy()
    train_aggregate = train[:, 0, :, :].reshape(train.shape[0], 1, -1,
                                                train.shape[3])
    valid_aggregate = valid[:, 0, :, :].reshape(valid.shape[0], 1, -1,
                                                train.shape[3])
    test_aggregate = test[:, 0, :, :].reshape(test.shape[0], 1, -1,
                                              train.shape[3])

    out_train, out_valid, out_test = preprocess(train, valid, test)

    loss_func = nn.L1Loss()
    model = AppliancesCNN(len(ORDER))
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    if cuda_av:
        model = model.cuda()
        loss_func = loss_func.cuda()

    inp = Variable(torch.Tensor(train_aggregate), requires_grad=False)
    valid_inp = Variable(torch.Tensor(valid_aggregate), requires_grad=False)
    test_inp = Variable(torch.Tensor(test_aggregate), requires_grad=False)
    if cuda_av:
        inp = inp.cuda()
        valid_inp = valid_inp.cuda()
        test_inp = test_inp.cuda()
    valid_out = torch.cat([
        out_valid[appliance_num]
        for appliance_num, appliance in enumerate(ORDER)
    ])
    test_out = torch.cat([
        out_test[appliance_num]
        for appliance_num, appliance in enumerate(ORDER)
    ])
    train_out = torch.cat([
        out_train[appliance_num]
        for appliance_num, appliance in enumerate(ORDER)
    ])

    valid_pred = {}
    train_pred = {}
    test_pred = {}
    train_losses = {}
    test_losses = {}
    valid_losses = {}

    params = [inp, p]
    for a_num, appliance in enumerate(ORDER):
        params.append(out_train[a_num])

    if cuda_av:
        train_out = train_out.cuda()

    for t in range(1, num_iterations + 1):

        pred = model(*params)
        optimizer.zero_grad()
        loss = loss_func(pred, train_out)

        if t % 500 == 0:

            if cuda_av:
                valid_inp = valid_inp.cuda()
            valid_params = [valid_inp, -2]
            for i in range(len(ORDER)):
                valid_params.append(None)
            valid_pr = model(*valid_params)
            valid_loss = loss_func(valid_pr, valid_out)

            if cuda_av:
                test_inp = test_inp.cuda()
            test_params = [test_inp, -2]
            for i in range(len(ORDER)):
                test_params.append(None)
            test_pr = model(*test_params)
            test_loss = loss_func(test_pr, test_out)

            test_losses[t] = test_loss.data[0]
            valid_losses[t] = valid_loss.data[0]
            train_losses[t] = loss.data[0]
            # np.save("./baseline/p_50_loss")

            if t % 1000 == 0:
                valid_pr = torch.clamp(valid_pr, min=0.)
                valid_pred[t] = valid_pr
                test_pr = torch.clamp(test_pr, min=0.)
                test_pred[t] = test_pr
                train_pr = pred
                train_pr = torch.clamp(train_pr, min=0.)
                train_pred[t] = train_pr

            print("Round:", t, "Training Error:", loss.data[0],
                  "Validation Error:", valid_loss.data[0], "Test Error:",
                  test_loss.data[0])

        loss.backward()
        optimizer.step()

    train_fold = [None for x in range(len(ORDER))]
    train_fold = {}
    for t in range(1000, num_iterations + 1, 1000):
        train_pred[t] = torch.split(train_pred[t], train_aggregate.shape[0])
        train_fold[t] = [None for x in range(len(ORDER))]
        if cuda_av:
            for appliance_num, appliance in enumerate(ORDER):
                train_fold[t][appliance_num] = train_pred[t][
                    appliance_num].cpu().data.numpy().reshape(
                        -1, train.shape[3])
        else:
            for appliance_num, appliance in enumerate(ORDER):
                train_fold[t][appliance_num] = train_pred[t][
                    appliance_num].data.numpy().reshape(-1, train.shape[3])

    valid_fold = {}
    for t in range(1000, num_iterations + 1, 1000):
        valid_pred[t] = torch.split(valid_pred[t], valid_aggregate.shape[0])
        valid_fold[t] = [None for x in range(len(ORDER))]
        if cuda_av:
            for appliance_num, appliance in enumerate(ORDER):
                valid_fold[t][appliance_num] = valid_pred[t][
                    appliance_num].cpu().data.numpy().reshape(
                        -1, train.shape[3])
        else:
            for appliance_num, appliance in enumerate(ORDER):
                valid_fold[t][appliance_num] = valid_pred[t][
                    appliance_num].data.numpy().reshape(-1, train.shape[3])

    test_fold = {}
    for t in range(1000, num_iterations + 1, 1000):
        test_pred[t] = torch.split(test_pred[t], test_aggregate.shape[0])
        test_fold[t] = [None for x in range(len(ORDER))]
        if cuda_av:
            for appliance_num, appliance in enumerate(ORDER):
                test_fold[t][appliance_num] = test_pred[t][appliance_num].cpu(
                ).data.numpy().reshape(-1, train.shape[3])
        else:
            for appliance_num, appliance in enumerate(ORDER):
                test_fold[t][appliance_num] = test_pred[t][
                    appliance_num].data.numpy().reshape(-1, train.shape[3])

    # store ground truth of validation set
    train_gt_fold = [None for x in range(len(ORDER))]
    for appliance_num, appliance in enumerate(ORDER):
        train_gt_fold[appliance_num] = train[:,
                                             APPLIANCE_ORDER.
                                             index(appliance), :, :].reshape(
                                                 train_aggregate.shape[0], -1,
                                                 1).reshape(
                                                     -1, train.shape[3])

    valid_gt_fold = [None for x in range(len(ORDER))]
    for appliance_num, appliance in enumerate(ORDER):
        valid_gt_fold[appliance_num] = valid[:,
                                             APPLIANCE_ORDER.
                                             index(appliance), :, :].reshape(
                                                 valid_aggregate.shape[0], -1,
                                                 1).reshape(
                                                     -1, train.shape[3])

    test_gt_fold = [None for x in range(len(ORDER))]
    for appliance_num, appliance in enumerate(ORDER):
        test_gt_fold[appliance_num] = test[:,
                                           APPLIANCE_ORDER.
                                           index(appliance), :, :].reshape(
                                               test_aggregate.shape[0], -1,
                                               1).reshape(-1, train.shape[3])

    # calcualte the error of validation set
    train_error = {}
    for t in range(1000, num_iterations + 1, 1000):
        train_error[t] = {}
        for appliance_num, appliance in enumerate(ORDER):
            train_error[t][appliance] = mean_absolute_error(
                train_fold[t][appliance_num], train_gt_fold[appliance_num])

    valid_error = {}
    for t in range(1000, num_iterations + 1, 1000):
        valid_error[t] = {}
        for appliance_num, appliance in enumerate(ORDER):
            valid_error[t][appliance] = mean_absolute_error(
                valid_fold[t][appliance_num], valid_gt_fold[appliance_num])

    test_error = {}
    for t in range(1000, num_iterations + 1, 1000):
        test_error[t] = {}
        for appliance_num, appliance in enumerate(ORDER):
            test_error[t][appliance] = mean_absolute_error(
                test_fold[t][appliance_num], test_gt_fold[appliance_num])

    return train_fold, valid_fold, test_fold, train_error, valid_error, test_error, train_losses, valid_losses, test_losses
 def loss(self, scores, targets):
     # loss = nn.MSELoss()(scores,targets)
     loss = nn.L1Loss()(scores, targets)
     return loss
Beispiel #3
0
 def __init__(self):
     super(VGGLoss, self).__init__()
     self.vgg = Vgg19().cuda().eval()
     self.criterion = nn.L1Loss()
     self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0]
Beispiel #4
0
    h1 = 32
    num_layers = 2
    learning_rate = 1e-2
    dtype = torch.float

    model = bigLSTM(length_train,
                    h1,
                    batch_size=1,
                    output_dim=horizon,
                    num_layers=num_layers)

    epoch = 2
    use_gpu = torch.cuda.is_available()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)
    criterion = nn.L1Loss()

    for asset_to_predict in clean_forex.columns:
        prediction = forecast_asset(model, optimizer, criterion,
                                    asset_to_predict, returns, window,
                                    length_train, horizon, epoch)
        filename = RAW_PATH + 'lstm_' + str(
            horizon) + '_' + asset_to_predict + '.csv'
        prediction.to_csv(filename)
        print(asset_to_predict + ' done')

    list_lstm_predictions = glob.glob(RAW_PATH + 'lstm_*')

    prediction_df = pd.DataFrame()
    for file in list_lstm_predictions:
        current_df = pd.read_csv(file, index_col=0, header=[0, 1])
Beispiel #5
0
def trainModel(name, mode, XS, YS):
    #     YS = YS[:,0,:,:]
    print('Model Training Started ...', time.ctime())
    print('TIMESTEP_IN, TIMESTEP_OUT', TIMESTEP_IN, TIMESTEP_OUT)
    model = getModel(name)
    summary(model, (GWN_CHANNEL, N_NODE, GWN_TIMESTEP_OUT),
            device="cuda:{}".format(GPU))
    XS_torch, YS_torch = torch.Tensor(XS).to(device), torch.Tensor(YS).to(
        device)
    trainval_data = torch.utils.data.TensorDataset(XS_torch, YS_torch)
    trainval_size = len(trainval_data)
    train_size = int(trainval_size * (1 - TRAINVALSPLIT))
    print('XS_torch.shape:  ', XS_torch.shape)
    print('YS_torch.shape:  ', YS_torch.shape)
    train_data = torch.utils.data.Subset(trainval_data,
                                         list(range(0, train_size)))
    val_data = torch.utils.data.Subset(trainval_data,
                                       list(range(train_size, trainval_size)))
    train_iter = torch.utils.data.DataLoader(train_data,
                                             BATCHSIZE,
                                             shuffle=True)
    val_iter = torch.utils.data.DataLoader(val_data, BATCHSIZE, shuffle=True)
    if LOSS == "GraphWaveNetLoss":
        criterion = Metrics.masked_mae
    if LOSS == 'MSE':
        criterion = nn.MSELoss()
    else:
        criterion = nn.L1Loss()
    if OPTIMIZER == 'RMSprop':
        optimizer = torch.optim.RMSprop(model.parameters(), lr=LEARN)
    else:
        optimizer = torch.optim.Adam(model.parameters(), lr=LEARN)
#     scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.7)

    min_val_loss = np.inf
    wait = 0
    clip = 5
    for epoch in range(EPOCH):
        starttime = datetime.now()
        loss_sum, n = 0.0, 0
        model.train()
        for x, y in train_iter:
            optimizer.zero_grad()
            x = nn.functional.pad(x, (1, 0, 0, 0)).to(device)
            y = y[:, 0, :, :].to(device)
            y_pred = model(x)
            real = torch.unsqueeze(y, dim=1)
            predict = y_pred
            loss = criterion(predict, real, 0.0)
            #             loss = Metrics.masked_mae(predict, real,0.0)
            print('loss ok')
            loss.backward()
            if clip is not None:
                torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
            optimizer.step()
            loss_sum += loss.item() * real.shape[0]
            n += real.shape[0]


#         scheduler.step()
        train_loss = loss_sum / n
        val_loss = evaluateModel(model, criterion, val_iter)
        if val_loss < min_val_loss:
            wait = 0
            min_val_loss = val_loss
            torch.save(model.state_dict(), PATH + '/' + name + '.pt')
        else:
            wait += 1
            if wait == PATIENCE:
                print('Early stopping at epoch: %d' % epoch)
                break
        endtime = datetime.now()
        epoch_time = (endtime - starttime).seconds
        print("epoch", epoch, "time used:", epoch_time, " seconds ",
              "train loss:", train_loss, "validation loss:", val_loss)
        with open(PATH + '/' + name + '_log.txt', 'a') as f:
            f.write("%s, %d, %s, %d, %s, %.10f, %s, %.10f\n" %
                    ("epoch", epoch, "time used", epoch_time, "train loss",
                     train_loss, "validation loss:", val_loss))

    torch_score = evaluateModel(model, criterion, train_iter)
    YS_pred = predictModel(
        model,
        torch.utils.data.DataLoader(trainval_data, BATCHSIZE, shuffle=False))
    print('YS.shape, YS_pred.shape,', YS.shape, YS_pred.shape)
    YS, YS_pred = scaler.inverse_transform(
        np.squeeze(YS)), scaler.inverse_transform(np.squeeze(YS_pred))
    print('YS.shape, YS_pred.shape,', YS.shape, YS_pred.shape)
    MSE, RMSE, MAE, MAPE = Metrics.evaluate(YS, YS_pred)
    with open(PATH + '/' + name + '_prediction_scores.txt', 'a') as f:
        f.write("%s, %s, Torch MSE, %.10e, %.10f\n" %
                (name, mode, torch_score, torch_score))
        f.write("%s, %s, MSE, RMSE, MAE, MAPE, %.10f, %.10f, %.10f, %.10f\n" %
                (name, mode, MSE, RMSE, MAE, MAPE))
    print('*' * 40)
    print("%s, %s, Torch MSE, %.10e, %.10f\n" %
          (name, mode, torch_score, torch_score))
    print("%s, %s, MSE, RMSE, MAE, MAPE, %.10f, %.10f, %.10f, %.10f\n" %
          (name, mode, MSE, RMSE, MAE, MAPE))
    print('Model Training Ended ...', time.ctime())
Beispiel #6
0
import torch
import torch.nn as nn

maeloss = nn.L1Loss()
mseloss = nn.MSELoss()
softplus = nn.Softplus()


class DecomposeLossCalculator:
    def __init__(self):
        pass

    @staticmethod
    def content_loss(y: torch.Tensor, t: torch.Tensor) -> torch.Tensor:
        return torch.mean(torch.abs(y - t))

    @staticmethod
    def adversarial_disloss(discriminator: nn.Module, y: torch.Tensor,
                            t: torch.Tensor) -> torch.Tensor:
        sum_loss = 0
        fake_list = discriminator(y)
        real_list = discriminator(t)

        for fake, real in zip(fake_list, real_list):
            loss = torch.mean(softplus(-real)) + torch.mean(softplus(fake))
            sum_loss += loss

        return sum_loss

    @staticmethod
    def adversarial_genloss(discriminator: nn.Module,
Beispiel #7
0
def main(args):
    # parameters
    img_size = 128
    z_dim = 128
    lamb_obj = 1.0
    lamb_img = 0.1
    num_classes = 184 if args.dataset == 'coco' else 179
    num_obj = 8 if args.dataset == 'coco' else 31

    args.out_path = os.path.join(args.out_path, args.dataset, str(img_size))
    # data loader
    train_data = get_dataset(args.dataset, img_size)

    num_gpus = torch.cuda.device_count()
    num_workers = 2
    if num_gpus > 1:
        parallel = True
        args.batch_size = args.batch_size * num_gpus
        num_workers = num_workers * num_gpus
    else:
        parallel = False

    dataloader = torch.utils.data.DataLoader(train_data,
                                             batch_size=args.batch_size,
                                             drop_last=True,
                                             shuffle=True,
                                             num_workers=num_workers)

    # Load model
    device = torch.device('cuda')
    netG = ResnetGenerator128(num_classes=num_classes, output_dim=3).to(device)
    netD = CombineDiscriminator128(num_classes=num_classes).to(device)

    parallel = True
    if parallel:
        netG = DataParallelWithCallback(netG)
        netD = nn.DataParallel(netD)

    g_lr, d_lr = args.g_lr, args.d_lr
    gen_parameters = []
    for key, value in dict(netG.named_parameters()).items():
        if value.requires_grad:
            if 'mapping' in key:
                gen_parameters += [{'params': [value], 'lr': g_lr * 0.1}]
            else:
                gen_parameters += [{'params': [value], 'lr': g_lr}]

    g_optimizer = torch.optim.Adam(gen_parameters, betas=(0, 0.999))

    dis_parameters = []
    for key, value in dict(netD.named_parameters()).items():
        if value.requires_grad:
            dis_parameters += [{'params': [value], 'lr': d_lr}]
    d_optimizer = torch.optim.Adam(dis_parameters, betas=(0, 0.999))

    # make dirs
    if not os.path.exists(args.out_path):
        os.makedirs(args.out_path)
    if not os.path.exists(os.path.join(args.out_path, 'model/')):
        os.makedirs(os.path.join(args.out_path, 'model/'))
    writer = SummaryWriter(os.path.join(args.out_path, 'log'))

    logger = setup_logger("lostGAN", args.out_path, 0)
    logger.info(netG)
    logger.info(netD)

    start_time = time.time()
    vgg_loss = VGGLoss()
    vgg_loss = nn.DataParallel(vgg_loss)
    l1_loss = nn.DataParallel(nn.L1Loss())
    for epoch in range(args.total_epoch):
        netG.train()
        netD.train()

        for idx, data in enumerate(tqdm(dataloader)):
            real_images, label, bbox = data
            real_images, label, bbox = real_images.to(device), label.long().to(
                device).unsqueeze(-1), bbox.float()

            # update D network
            netD.zero_grad()
            real_images, label = real_images.to(device), label.long().to(
                device)
            d_out_real, d_out_robj = netD(real_images, bbox, label)
            d_loss_real = torch.nn.ReLU()(1.0 - d_out_real).mean()
            d_loss_robj = torch.nn.ReLU()(1.0 - d_out_robj).mean()

            z = torch.randn(real_images.size(0), num_obj, z_dim).to(device)
            fake_images = netG(z, bbox, y=label.squeeze(dim=-1))
            d_out_fake, d_out_fobj = netD(fake_images.detach(), bbox, label)
            d_loss_fake = torch.nn.ReLU()(1.0 + d_out_fake).mean()
            d_loss_fobj = torch.nn.ReLU()(1.0 + d_out_fobj).mean()

            d_loss = lamb_obj * (d_loss_robj + d_loss_fobj) + lamb_img * (
                d_loss_real + d_loss_fake)
            d_loss.backward()
            d_optimizer.step()

            # update G network
            if (idx % 1) == 0:
                netG.zero_grad()
                g_out_fake, g_out_obj = netD(fake_images, bbox, label)
                g_loss_fake = -g_out_fake.mean()
                g_loss_obj = -g_out_obj.mean()

                pixel_loss = l1_loss(fake_images, real_images).mean()
                feat_loss = vgg_loss(fake_images, real_images).mean()

                g_loss = g_loss_obj * lamb_obj + g_loss_fake * lamb_img + pixel_loss + feat_loss
                g_loss.backward()
                g_optimizer.step()

            if (idx + 1) % 500 == 0:
                elapsed = time.time() - start_time
                elapsed = str(datetime.timedelta(seconds=elapsed))
                logger.info("Time Elapsed: [{}]".format(elapsed))
                logger.info(
                    "Step[{}/{}], d_out_real: {:.4f}, d_out_fake: {:.4f}, g_out_fake: {:.4f} "
                    .format(epoch + 1, idx + 1, d_loss_real.item(),
                            d_loss_fake.item(), g_loss_fake.item()))
                logger.info(
                    "             d_obj_real: {:.4f}, d_obj_fake: {:.4f}, g_obj_fake: {:.4f} "
                    .format(d_loss_robj.item(), d_loss_fobj.item(),
                            g_loss_obj.item()))
                logger.info(
                    "             pixel_loss: {:.4f}, feat_loss: {:.4f}".
                    format(pixel_loss.item(), feat_loss.item()))

                writer.add_image(
                    "real images",
                    make_grid(real_images.cpu().data * 0.5 + 0.5, nrow=4),
                    epoch * len(dataloader) + idx + 1)
                writer.add_image(
                    "fake images",
                    make_grid(fake_images.cpu().data * 0.5 + 0.5, nrow=4),
                    epoch * len(dataloader) + idx + 1)

                writer.add_scalars("D_loss_real", {
                    "real": d_loss_real.item(),
                    "robj": d_loss_robj.item()
                })
                writer.add_scalars("D_loss_fake", {
                    "fake": d_loss_fake.item(),
                    "fobj": d_loss_fobj.item()
                })
                writer.add_scalars("G_loss", {
                    "fake": g_loss_fake.item(),
                    "obj": g_loss_obj.item()
                })

        # save model
        if (epoch + 1) % 5 == 0:
            torch.save(
                netG.state_dict(),
                os.path.join(args.out_path, 'model/',
                             'G_%d.pth' % (epoch + 1)))
Beispiel #8
0
    def train(self):

        groups = [
            dict(params=self.model.prediction_weights,
                 weight_decay=self.config.hp.weight_decay)
        ]

        if self.model.hidden_states:
            groups.append({
                'params': self.model.hidden_states,
                'weight_decay': 0
            })

        if self.config.model.impute_missing:
            groups.append({
                'params': self.model.imputation_weights,
                'lr': self.config.hp.imputation_lr
            })

        optimizer = optim.Adam(groups,
                               lr=self.config.hp.initial_lr,
                               betas=(self.config.hp.beta1,
                                      self.config.hp.beta2),
                               eps=1e-08,
                               weight_decay=self.config.hp.weight_decay)

        lr_decayer = ReduceLROnPlateau(
            optimizer,
            factor=self.config.hp.decay_lr,
            verbose=self.config.training.verbose,
            patience=self.config.training.lr_patience_decay,
            min_lr=self.config.hp.minimal_lr)

        MSEloss = nn.L1Loss()

        for e in range(self.config.hp.n_epochs):
            train_loader = DataLoader(self.tsd_train,
                                      batch_size=self.config.hp.batch_size,
                                      shuffle=True,
                                      pin_memory=False)
            batch_number = len(train_loader)

            with Process('Epoch %i' % (e + 1), total=batch_number) as p_epoch:
                for i, train_batch in enumerate(train_loader):
                    self.model.train()
                    seqs_size = train_batch[-1].cuda(
                        self.config.experiment.gpu)
                    max_batch_length = torch.max(seqs_size)
                    input_train = init_cuda_sequences_batch(
                        train_batch[:-1], max_batch_length,
                        self.config.experiment.gpu)
                    mask_seq = input_train[-1]
                    x = input_train[0]
                    y = input_train[1]
                    if self.config.model.impute_missing:
                        m = input_train[2]
                        l = input_train[3]
                        x = self.model.imputation(x, m, l)
                    output = self.model(x, seqs_size)

                    mask_seq = torch.flatten(mask_seq)
                    y = torch.flatten(y)
                    if self.config.experiment.predict_all_timestep:
                        output = output.view(-1, output.size(-1))
                        masked_output = torch.flatten(
                            mask_seq.view(-1, 1) * output)

                    else:
                        output = torch.flatten(output)
                        masked_output = mask_seq * output
                    l = MSEloss(masked_output, y)
                    self.model.zero_grad()
                    # encoder
                    l.backward(retain_graph=True)
                    optimizer.step()
                    p_epoch.update(1)

                    if i and i % self.config.training.validation_freq == 0:
                        valid_loader = DataLoader(
                            dataset=self.tsd_valid,
                            batch_size=self.config.hp.batch_size,
                            shuffle=True,
                            pin_memory=False)
                        full_pred, full_gt, validation_loss = self.test(
                            self.model, valid_loader)

                        if self.config.training.verbose:
                            self._print(
                                "Epoch %i, iteration %s, Training loss %f" %
                                (e + 1, i, float(l.cpu())))
                            self._print("Validation: loss %f" %
                                        (validation_loss))

                        validation_criteria = validation_loss
                        lr_decayer.step(validation_loss)

                        if e == 0:
                            min_criteria_validation = validation_criteria
                        else:
                            if validation_criteria < min_criteria_validation:
                                min_criteria_validation = validation_criteria
                                self.model.save_model(
                                    epoch=e + 1,
                                    iteration=i,
                                    loss=validation_loss,
                                    use_datetime=self.config.training.
                                    save_in_timestamp_folder)

            p_epoch.succeed()
Beispiel #9
0
    def create_network(self, blocks):
        models = nn.ModuleList()

        prev_filters = 3
        out_filters = []
        conv_id = 0
        for block in blocks:
            if block['type'] == 'net':
                prev_filters = int(block['channels'])
                continue
            elif block['type'] == 'convolutional':
                conv_id = conv_id + 1
                batch_normalize = int(block['batch_normalize'])
                filters = int(block['filters'])
                kernel_size = int(block['size'])
                stride = int(block['stride'])
                is_pad = int(block['pad'])
                pad = (kernel_size - 1) / 2 if is_pad else 0
                activation = block['activation']
                model = nn.Sequential()
                if batch_normalize:
                    model.add_module(
                        'conv{0}'.format(conv_id),
                        nn.Conv2d(prev_filters,
                                  filters,
                                  kernel_size,
                                  stride,
                                  pad,
                                  bias=False))
                    model.add_module('bn{0}'.format(conv_id),
                                     nn.BatchNorm2d(filters))
                    #model.add_module('bn{0}'.format(conv_id), BN2d(filters))
                else:
                    model.add_module(
                        'conv{0}'.format(conv_id),
                        nn.Conv2d(prev_filters, filters, kernel_size, stride,
                                  pad))
                if activation == 'leaky':
                    model.add_module('leaky{0}'.format(conv_id),
                                     nn.LeakyReLU(0.1, inplace=True))
                elif activation == 'relu':
                    model.add_module('relu{0}'.format(conv_id),
                                     nn.ReLU(inplace=True))
                prev_filters = filters
                out_filters.append(prev_filters)
                models.append(model)
            elif block['type'] == 'maxpool':
                pool_size = int(block['size'])
                stride = int(block['stride'])
                if stride > 1:
                    model = nn.MaxPool2d(pool_size, stride)
                else:
                    model = MaxPoolStride1()
                out_filters.append(prev_filters)
                models.append(model)
            elif block['type'] == 'avgpool':
                model = GlobalAvgPool2d()
                out_filters.append(prev_filters)
                models.append(model)
            elif block['type'] == 'softmax':
                model = nn.Softmax()
                out_filters.append(prev_filters)
                models.append(model)
            elif block['type'] == 'cost':
                if block['_type'] == 'sse':
                    model = nn.MSELoss(size_average=True)
                elif block['_type'] == 'L1':
                    model = nn.L1Loss(size_average=True)
                elif block['_type'] == 'smooth':
                    model = nn.SmoothL1Loss(size_average=True)
                out_filters.append(1)
                models.append(model)
            elif block['type'] == 'reorg':
                stride = int(block['stride'])
                prev_filters = stride * stride * prev_filters
                out_filters.append(prev_filters)
                models.append(Reorg(stride))
            elif block['type'] == 'route':
                layers = block['layers'].split(',')
                ind = len(models)
                layers = [
                    int(i) if int(i) > 0 else int(i) + ind for i in layers
                ]
                if len(layers) == 1:
                    prev_filters = out_filters[layers[0]]
                elif len(layers) == 2:
                    assert (layers[0] == ind - 1)
                    prev_filters = out_filters[layers[0]] + out_filters[
                        layers[1]]
                out_filters.append(prev_filters)
                models.append(EmptyModule())
            elif block['type'] == 'shortcut':
                ind = len(models)
                prev_filters = out_filters[ind - 1]
                out_filters.append(prev_filters)
                models.append(EmptyModule())
            elif block['type'] == 'connected':
                filters = int(block['output'])
                if block['activation'] == 'linear':
                    model = nn.Linear(prev_filters, filters)
                elif block['activation'] == 'leaky':
                    model = nn.Sequential(nn.Linear(prev_filters, filters),
                                          nn.LeakyReLU(0.1, inplace=True))
                elif block['activation'] == 'relu':
                    model = nn.Sequential(nn.Linear(prev_filters, filters),
                                          nn.ReLU(inplace=True))
                prev_filters = filters
                out_filters.append(prev_filters)
                models.append(model)
            elif block['type'] == 'region':
                loss = RegionLoss()
                anchors = block['anchors'].split(',')
                loss.anchors = [float(i) for i in anchors]
                loss.num_classes = int(block['classes'])
                loss.num_anchors = int(block['num'])
                loss.anchor_step = len(loss.anchors) / loss.num_anchors
                loss.object_scale = float(block['object_scale'])
                loss.noobject_scale = float(block['noobject_scale'])
                loss.class_scale = float(block['class_scale'])
                loss.coord_scale = float(block['coord_scale'])
                out_filters.append(prev_filters)
                models.append(loss)
            else:
                print('unknown type %s' % (block['type']))

        return models
Y = train_labels_onehot

G = networks.Generator1(opt.g_input_size, opt.g_hidden_size, opt.g_output_size)
D = networks.Discriminator1(opt.d_input_size, opt.d_hidden_size, nclasses,
                            opt.d_output_size, opt.bit)

# print(G)
# print(D)
# print(H)
G.cuda()
D.cuda()
#H.cuda()

#loss
criterionGAN = GANLoss()
criterionL1 = nn.L1Loss()
criterionGAN = criterionGAN.cuda()
criterionL1 = criterionL1.cuda()
aux_criterion = nn.NLLLoss().cuda()

# Adam optimizer
G_optimizer = optim.Adam(G.parameters(),
                         lr=opt.lrG,
                         betas=(opt.beta1, opt.beta2))

params = [
    {
        'params': D.map1.parameters(),
        "lr": 1e-3
    },
    {
Beispiel #11
0
    def test(self, model, dataloader, use_uncertain=False):
        model.eval()
        MSEloss = nn.L1Loss(reduction='none')
        with torch.no_grad():
            full_pred = []
            full_gt = []
            full_std = []
            full_loss = []
            nb_sequences = 0
            for j, valid_batch in enumerate(dataloader):
                seqs_size = valid_batch[-1].cuda(self.config.experiment.gpu)
                nb_sequences += seqs_size.size(0)
                max_batch_length = torch.max(seqs_size)
                input_train = init_cuda_sequences_batch(
                    valid_batch[:-1], max_batch_length,
                    self.config.experiment.gpu)
                mask_seq = input_train[-1]
                x = input_train[0]
                y = input_train[1]
                if self.config.model.impute_missing:
                    m = input_train[2]
                    l = input_train[3]
                    x = model.imputation(x, m, l)
                if self.config.experiment.predict_all_timestep:
                    y = y[:, :, -1]
                if use_uncertain:
                    outs = []

                    model.train()
                    n_iter = use_uncertain
                    for i in range(n_iter):
                        output = model(x, seqs_size)
                        if self.config.experiment.predict_all_timestep:
                            output = output[:, :, -1]

                        size = output.size()
                        outs.append(output)

                    outs = torch.cat(outs).view(n_iter, *size)
                    output = torch.mean(outs, dim=0)
                    std_out = torch.std(outs, dim=0).cpu().numpy()

                else:
                    output = model(x, seqs_size)
                    if self.config.experiment.predict_all_timestep:
                        output = output[:, :, -1]

                masked_output = mask_seq * torch.squeeze(output)

                l = MSEloss(masked_output, torch.squeeze(y)).cpu().numpy()

                pred = masked_output.cpu().numpy()

                y = y.cpu().numpy()
                seqs_size = seqs_size.cpu().numpy()
                for i, length in enumerate(seqs_size):
                    y_sample = y[i, :length]
                    pred_sample = pred[i, :length]
                    full_gt.append(y_sample.flatten())
                    full_pred.append(pred_sample.flatten())
                    full_loss.append(l[i, :length])
                    if use_uncertain:
                        full_std.append(std_out[i, :length].flatten())

        full_pred = np.hstack(full_pred)
        full_gt = np.hstack(full_gt)
        full_loss = np.mean(np.hstack(full_loss))
        if use_uncertain:
            full_std = np.hstack(full_std)

        if use_uncertain:
            return full_pred, full_gt, full_loss, full_std
        else:
            return full_pred, full_gt, full_loss
Beispiel #12
0
def main(args):
    # Get device
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Define model
    model = nn.DataParallel(network.Model()).to(device)
    print("Model Ha s Been Defined")
    num_param = sum(param.numel() for param in model.parameters())
    print('Number of Transformer-TTS Parameters:', num_param)

    # Get dataset
    dataset = TransformerTTSDataset()

    # Optimizer and loss
    optimizer = torch.optim.Adam(model.parameters(), lr=hp.lr)
    print("Defined Optimizer")

    # Get training loader
    training_loader = DataLoader(dataset,
                                 batch_size=hp.batch_size,
                                 shuffle=True,
                                 collate_fn=collate_fn,
                                 drop_last=True,
                                 num_workers=cpu_count())
    print("Got Training Loader")

    try:
        checkpoint = torch.load(
            os.path.join(hp.checkpoint_path,
                         'checkpoint_%d.pth.tar' % args.restore_step))
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("\n------Model Restored at Step %d------\n" % args.restore_step)

    except:
        print("\n------Start New Training------\n")
        if not os.path.exists(hp.checkpoint_path):
            os.mkdir(hp.checkpoint_path)

    # Init logger
    if not os.path.exists(hp.logger_path):
        os.mkdir(hp.logger_path)

    # Training
    model = model.train()

    total_step = hp.epochs * len(training_loader)
    Time = np.array(list())
    Start = time.clock()

    for epoch in range(hp.epochs):
        for i, data_of_batch in enumerate(training_loader):
            start_time = time.clock()

            current_step = i + args.restore_step + \
                epoch * len(training_loader) + 1

            # Init
            optimizer.zero_grad()

            # Get Data
            character = torch.from_numpy(
                data_of_batch["texts"]).long().to(device)
            mel_input = torch.from_numpy(
                data_of_batch["mel_input"]).float().to(device)
            mel_target = torch.from_numpy(
                data_of_batch["mel_target"]).float().to(device)
            pos_text = torch.from_numpy(
                data_of_batch["pos_text"]).long().to(device)
            pos_mel = torch.from_numpy(
                data_of_batch["pos_mel"]).long().to(device)
            stop_target = pos_mel.eq(0).float().to(device)

            # Forward
            mel_pred, postnet_pred, _, stop_preds, _, _ = model.forward(
                character, mel_input, pos_text, pos_mel)

            # Cal Loss
            mel_loss = nn.L1Loss()(mel_pred, mel_target)
            mel_postnet_loss = nn.L1Loss()(postnet_pred, mel_target)
            stop_pred_loss = nn.MSELoss()(stop_preds, stop_target)
            total_loss = mel_loss + mel_postnet_loss + stop_pred_loss

            # Logger
            t_l = total_loss.item()
            m_l = mel_loss.item()
            m_p_l = mel_postnet_loss.item()
            s_l = stop_pred_loss.item()

            with open(os.path.join("logger", "total_loss.txt"),
                      "a") as f_total_loss:
                f_total_loss.write(str(t_l) + "\n")

            with open(os.path.join("logger", "mel_loss.txt"),
                      "a") as f_mel_loss:
                f_mel_loss.write(str(m_l) + "\n")

            with open(os.path.join("logger", "mel_postnet_loss.txt"),
                      "a") as f_mel_postnet_loss:
                f_mel_postnet_loss.write(str(m_p_l) + "\n")

            with open(os.path.join("logger", "stop_pred_loss.txt"),
                      "a") as f_s_loss:
                f_s_loss.write(str(s_l) + "\n")

            # Backward
            total_loss.backward()

            # Clipping gradients to avoid gradient explosion
            nn.utils.clip_grad_norm_(model.parameters(), 1.)

            # Update weights
            optimizer.step()
            current_learning_rate = adjust_learning_rate(
                optimizer, current_step)

            # Print
            if current_step % hp.log_step == 0:
                Now = time.clock()

                str1 = "Epoch [{}/{}], Step [{}/{}], Mel Loss: {:.4f}, Mel PostNet Loss: {:.4f};".format(
                    epoch + 1, hp.epochs, current_step, total_step,
                    mel_loss.item(), mel_postnet_loss.item())
                str2 = "Stop Predicted Loss: {:.4f}, Total Loss: {:.4f}.".format(
                    stop_pred_loss.item(), total_loss.item())
                str3 = "Current Learning Rate is {:.6f}.".format(
                    current_learning_rate)
                str4 = "Time Used: {:.3f}s, Estimated Time Remaining: {:.3f}s.".format(
                    (Now - Start), (total_step - current_step) * np.mean(Time))

                print("\n" + str1)
                print(str2)
                print(str3)
                print(str4)

                with open(os.path.join("logger", "logger.txt"),
                          "a") as f_logger:
                    f_logger.write(str1 + "\n")
                    f_logger.write(str2 + "\n")
                    f_logger.write(str3 + "\n")
                    f_logger.write(str4 + "\n")
                    f_logger.write("\n")

            if current_step % hp.save_step == 0:
                torch.save(
                    {
                        'model': model.state_dict(),
                        'optimizer': optimizer.state_dict()
                    },
                    os.path.join(hp.checkpoint_path,
                                 'checkpoint_%d.pth.tar' % current_step))
                print("save model at step %d ..." % current_step)

            end_time = time.clock()
            Time = np.append(Time, end_time - start_time)
            if len(Time) == hp.clear_Time:
                temp_value = np.mean(Time)
                Time = np.delete(Time, [i for i in range(len(Time))],
                                 axis=None)
                Time = np.append(Time, temp_value)
Beispiel #13
0
    def __init__(self, opt):
        super(MICSNet, self).__init__(opt)
        train_opt = opt['train']
        self.pretrain = opt['sensing_matrix']['pretrain']
        self.stages = opt['network_G']['stages']
        self.decoder_name = opt['network_G']['which_model_G']
        self.encoder_name = opt['network_G']['which_model_encoder']
        self.proj_method = opt['sensing_matrix']['proj_method']
        self.sensing_matrix = np.load(opt['sensing_matrix']['root'])
        # self.sensing_matrix = np.expand_dims(self.sensing_matrix, axis=0)
        # self.sensing_matrix = np.repeat(self.sensing_matrix, opt['datasets']['train']['batch_size'], axis=0)
        self.sensing_matrix = torch.from_numpy(self.sensing_matrix).float().to(self.device)

        # define networks and load pretrained models
        self.encoder_finetune = opt['network_G']['which_model_encoder'] is not None
        self.decoder_finetune = 'finetune' in opt['name']

        self.Encoder = networks.define_Encoder(opt).to(self.device)
        self.netG = networks.define_G(opt).to(self.device)  # G

        if self.is_train:
            if self.encoder_finetune:
                self.Encoder.train()
            self.netG.train()
        self.load()

        # define losses, optimizer and scheduler
        if self.is_train:
            # self.MILoss = MutualInfoLoss(opt)
            self.MILoss = MutualInfoLoss(opt)

            # G pixel loss
            if train_opt['pixel_weight'] > 0:
                l_pix_type = train_opt['pixel_criterion']
                if l_pix_type == 'l1':
                    self.cri_pix = nn.L1Loss().to(self.device)
                elif l_pix_type == 'l2':
                    self.cri_pix = nn.MSELoss().to(self.device)
                elif l_pix_type == 'charbonnier':
                    self.cri_pix = L1CharbonnierLoss().to(self.device)
                else:
                    raise NotImplementedError('Loss type [{:s}] not recognized.'.format(l_pix_type))
                self.l_pix_w = train_opt['pixel_weight']
            else:
                print('Remove pixel loss.')
                self.cri_pix = None

            # G consistant loss
            if train_opt['consistant_weight'] > 0:
                l_cons_type = train_opt['consistant_criterion']
                if l_cons_type == 'l1':
                    self.cri_cons = nn.L1Loss().to(self.device)
                elif l_cons_type == 'l2':
                    self.cri_cons = nn.MSELoss().to(self.device)
                elif l_cons_type == 'charbonnier':
                    self.cri_cons = L1CharbonnierLoss().to(self.device)
                else:
                    raise NotImplementedError('Loss type [{:s}] not recognized.'.format(l_cons_type))
                self.l_cons_w = train_opt['consistant_weight']
            else:
                print('Remove consistant loss.')
                self.cri_cons = None

            # G feature loss
            if train_opt['feature_weight'] > 0:
                l_fea_type = train_opt['feature_criterion']
                if l_fea_type == 'l1':
                    self.cri_fea = nn.L1Loss().to(self.device)
                elif l_fea_type == 'l2':
                    self.cri_fea = nn.MSELoss().to(self.device)
                elif l_fea_type == 'charbonnier':
                    self.cri_pix = L1CharbonnierLoss().to(self.device)
                else:
                    raise NotImplementedError('Loss type [{:s}] not recognized.'.format(l_fea_type))
                self.l_fea_w = train_opt['feature_weight']
            else:
                print('Remove feature loss.')
                self.cri_fea = None
            if self.cri_fea:  # load VGG perceptual loss
                self.netF = networks.define_F(opt, use_bn=False).to(self.device)

            # optimizers
            # G
            wd_G = train_opt['weight_decay_G'] if train_opt['weight_decay_G'] else 0
            optim_params = self.netG.parameters()
            self.optimizer_G = torch.optim.Adam(optim_params, lr=train_opt['lr_G'],
                                                weight_decay=wd_G, betas=(train_opt['beta1_G'], 0.999))
            self.optimizers.append(self.optimizer_G)

            # Encoder
            optim_params_encoder = self.Encoder.parameters()
            self.optimizer_encoder = torch.optim.Adam(optim_params_encoder, lr=train_opt['lr_G'],
                                                weight_decay=wd_G, betas=(train_opt['beta1_G'], 0.999))
            self.optimizers.append(self.optimizer_encoder)

            # schedulers
            if train_opt['lr_scheme'] == 'MultiStepLR':
                for optimizer in self.optimizers:
                    self.schedulers.append(lr_scheduler.MultiStepLR(optimizer,
                        train_opt['lr_steps'], train_opt['lr_gamma']))
            else:
                raise NotImplementedError('MultiStepLR learning rate scheme is enough.')

            self.log_dict = OrderedDict()

        print('---------- Model initialized ------------------')
        self.print_network()
        print('-----------------------------------------------')
Beispiel #14
0
    def __init__(self, opt):
        """Initialize the FET-model class.

        Parameters:
            opt (Option dicts)-- stores all the experiment flags;
        """

        BaseModel.__init__(self, opt)

        self.opt = opt
        # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
        self.loss_names = [
            'GE', 'D', 'G_GAN', 'D_GAN', 'recons_L1', 'transfer_L1', 'code_L1',
            'GP'
        ]

        if self.opt['isTrain']:
            # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
            self.visual_names = [
                'source', 'refs', 'target', 'reconstruction', 'transfered'
            ]
            # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.
            self.model_names = ['G', 'E', 'D']
        else:
            self.visual_names = ['source', 'refs', 'transfered']
            self.model_names = ['G', 'E']

        # define networks (Generator, Encoder and discriminator)
        self.netG = nets.define_G(opt['input_nc'], opt['latent_nc'],
                                  opt['ngf'], opt['ng_downsample'],
                                  opt['ng_upsample'], opt['init_type'],
                                  opt['init_gain'], self.gpu_ids)
        # define encoder
        self.netE = nets.define_E(opt['input_nc'], opt['latent_nc'],
                                  opt['nef'], opt['ne_downsample'],
                                  opt['init_type'], opt['init_gain'],
                                  self.gpu_ids)
        if self.isTrain:
            # define discriminator
            self.netD = nets.define_D(opt['input_nc'], opt['ndf'],
                                      opt['num_cls'], opt['nd_downsample'],
                                      opt['init_type'], opt['init_gain'],
                                      self.gpu_ids)

            # define loss functions
            if self.opt['GAN_type'] == 'vanilla':
                self.criterionGAN = loss.GANLoss().to(self.device)
            elif self.opt['GAN_type'] == 'hinge':
                self.criterionGAN = loss.GANLoss_hinge().to(self.device)
            else:
                print("Invalid GAN loss type.")

            if self.opt['finetune']:
                self.criterionGAN = loss.GANLoss_hinge_finetune().to(
                    self.device)

            self.criterionRecons = nn.L1Loss()
            self.criterionTransfer = nn.L1Loss()
            self.criterionCode = nn.L1Loss()
            self.criterionGP = loss.GPLoss().to(self.device)

            # initialize optimizers;
            self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                                lr=opt['lr'],
                                                betas=(opt['beta1'], 0.999))
            self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                                lr=opt['lr'],
                                                betas=(opt['beta1'], 0.999))
            self.optimizer_E = torch.optim.Adam(self.netE.parameters(),
                                                lr=opt['lr'],
                                                betas=(opt['beta1'], 0.999))
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)
            self.optimizers.append(self.optimizer_E)
Beispiel #15
0
        nw = w // ratio
        temp = nn.functional.upsample(img, [nh, nw], mode='nearest')
        scaled_imgs.append(temp)
    return scaled_imgs


if __name__ == '__main__':
    dataset = KITTIDataset()
    # print(len(dataset))
    TrainLoader = torch.utils.data.DataLoader(dataset,
                                              batch_size=4,
                                              shuffle=True,
                                              num_workers=8)
    net = DispNet()
    net.to(torch.device("cuda:0"))
    loss_function = nn.L1Loss().cuda()
    optimizer = optim.Adam(net.parameters(), lr=0.0001)
    alpha = 0.3
    for epoch in range(50):
        for sample_batched in TrainLoader:
            # print("training sample for KITTI")

            # print(sample_batched["left_img"].shape)
            net.zero_grad()
            # print(sample_batched["right_img"].shape)
            left_original = sample_batched["left_img"]
            right_original = sample_batched["right_img"]

            # pyramid = tuple(pyramid_gaussian(left_original,  max_layer=4, downscale=2, multichannel=True))
            # # print("pyramid",size(pyramid))
def main():
    # check if cuda available
    device = 'cuda:0' if torch.cuda.is_available() else 'cpu'

    # define dataset and dataloader
    train_dataset = FlowDataset(mode='train')
    test_dataset = FlowDataset(mode='test')
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=16,
                              shuffle=True,
                              num_workers=4)
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=16,
                             shuffle=False,
                             num_workers=4)

    # hyper-parameters
    num_epochs = 10  #20
    lr = 0.001
    input_size = 17  # do not change input size
    hidden_size = 128
    num_layers = 2
    dropout = 0.1

    model = FlowLSTM(input_size=input_size,
                     hidden_size=hidden_size,
                     num_layers=num_layers,
                     dropout=dropout).to(device)

    criterion = nn.MSELoss()

    # define optimizer for lstm model
    optim = Adam(model.parameters(), lr=lr)
    lossess = []
    for epoch in range(num_epochs):
        losses = []
        for n_batch, (in_batch, label) in enumerate(train_loader):
            in_batch, label = in_batch.to(device), label.to(device)

            # train LSTM

            optim.zero_grad()

            output = model(in_batch)

            loss = criterion(output, label)
            losses.append(loss.item())

            loss.backward()
            optim.step()

            # print loss while training

            if (n_batch + 1) % 200 == 0:
                print("Epoch: [{}/{}], Batch: {}, Loss: {}".format(
                    epoch, num_epochs, n_batch, loss.item()))
        lossess.append(np.mean(np.array(losses)))

    from matplotlib import pyplot as plt
    plt.figure()
    plt.title('LSTM Loss')
    plt.ylabel('Loss')
    plt.xlabel('Num of Epochs')
    plt.plot(lossess)
    plt.show()

    # test trained LSTM model
    l1_err, l2_err = 0, 0
    l1_loss = nn.L1Loss()
    l2_loss = nn.MSELoss()
    model.eval()
    with torch.no_grad():
        for n_batch, (in_batch, label) in enumerate(test_loader):
            in_batch, label = in_batch.to(device), label.to(device)
            pred = model.test(in_batch)

            l1_err += l1_loss(pred, label).item()
            l2_err += l2_loss(pred, label).item()

    print("Test L1 error:", l1_err)
    print("Test L2 error:", l2_err)

    # visualize the prediction comparing to the ground truth
    if device is 'cpu':
        pred = pred.detach().numpy()[0, :, :]
        label = label.detach().numpy()[0, :, :]
    else:
        pred = pred.detach().cpu().numpy()[0, :, :]
        label = label.detach().cpu().numpy()[0, :, :]

    r = []
    num_points = 17
    interval = 1. / num_points
    x = int(num_points / 2)
    for j in range(-x, x + 1):
        r.append(interval * j)

    from matplotlib import pyplot as plt
    plt.figure()
    for i in range(1, len(pred)):
        c = (i / (num_points + 1), 1 - i / (num_points + 1), 0.5)
        plt.plot(pred[i], r, label='t = %s' % (i), c=c)
    plt.xlabel('velocity [m/s]')
    plt.ylabel('r [m]')
    plt.legend(bbox_to_anchor=(1, 1), fontsize='x-small')
    plt.show()

    plt.figure()
    for i in range(1, len(label)):
        c = (i / (num_points + 1), 1 - i / (num_points + 1), 0.5)
        plt.plot(label[i], r, label='t = %s' % (i), c=c)
    plt.xlabel('velocity [m/s]')
    plt.ylabel('r [m]')
    plt.legend(bbox_to_anchor=(1, 1), fontsize='x-small')
    plt.show()
Beispiel #17
0
x = init.uniform_(torch.Tensor(num_data, 1), -15,
                  15)  # -15에서 15사이의 num_data x 1의 크기의 랜덤 배열 생성
y = (x**2) + 3  # 찾고자 하는 label
y_noise = y + noise  # training 시키고자 하는 model

model = nn.Sequential(  # 괄호 안의 함수를 순차적으로 실행
    nn.Linear(1, 6),  # Input 1 Output 6의 선형 회귀
    nn.ReLU(),  # 활성화 함수
    nn.Linear(6, 10),
    nn.ReLU(),
    nn.Linear(10, 6),
    nn.ReLU(),
    nn.Linear(6, 1),  # 은닉계층을 지나 최종적으로는 Output이 1이어야함
)

loss_func = nn.L1Loss()  # 손실함수
optimizer = optim.SGD(model.parameters(), lr=0.002)  # 최적화 기법. 학습률 0.002

loss_array = []  # 각 step별 오차율을 저장 할 배열
for i in range(num_epoch):
    optimizer.zero_grad()  # 기울기 초기화
    output = model(x)  # model에 x값을 넣음
    loss = loss_func(output, y_noise)  # 학습 model(output)과 y_noise의 오차율을 계산
    loss.backward()  # 기울기 계산
    optimizer.step()  # 학습률 만큼 현재 기울기 업데이트

    loss_array.append(loss)  # 현재 오차율과 step을 저장
    if i % 10:
        print(loss.data)

plt.plot(loss_array)
def train():
    """
    Function to train CycleGAN. Various arguments below allow for saving checkpoints, temporary images,
    changing batch size, epochs, cyclic loss weighting, etc.
    """

    # Command line arguments
    parser = argparse.ArgumentParser()
    parser.add_argument("--learning_rate",
                        type=float,
                        help="Learning rate to use for training",
                        default=.0002)
    parser.add_argument("--show_images",
                        help="shows a generated image after every epoch",
                        action="store_true")
    parser.add_argument(
        "--checkpoint_frequency",
        type=int,
        help=
        "If given, saves a copy of the weights every x epochs, where x is the integer passed in. Default is no checkpoints saved"
    )
    parser.add_argument(
        "--prev_model",
        help=
        "if given, will load in previous saved model from a .tar file. Argument should be path to .tar file to load"
    )
    parser.add_argument("--path_to_A_images",
                        help="path to folder containing A dataset images",
                        default="./data/datasets/cezanne2photo/trainA")
    parser.add_argument("--path_to_B_images",
                        help="path to folder containing B dataset images",
                        default="./data/datasets/cezanne2photo/trainB")
    parser.add_argument("--batch_size",
                        type=int,
                        help="size of batches to train on",
                        default=1)
    parser.add_argument("--num_epochs",
                        type=int,
                        help="number of epochs to train for",
                        default=20)
    parser.add_argument("--lambda_weighting",
                        type=int,
                        help="Weight to apply to cyclic consistency loss",
                        default=10)
    parser.add_argument(
        "--show_progress",
        help=
        "If passed, will store temp images generated from both generators after each training pass",
        action="store_true")
    args = parser.parse_args()

    # Can change this argument to use gpu if available
    device = torch.device("cpu")

    # Creates datast and dataloader
    transform = transforms.Compose([transforms.ToTensor()])
    A_dataset = image_processing.CezanneDataset(args.path_to_A_images,
                                                transform)
    B_dataset = image_processing.CezanneDataset(args.path_to_B_images,
                                                transform)
    A_dataloader = utils.data.DataLoader(A_dataset,
                                         batch_size=args.batch_size,
                                         shuffle=True)
    B_dataloader = utils.data.DataLoader(B_dataset,
                                         batch_size=args.batch_size,
                                         shuffle=True)

    # Creates generators and discriminators(3s are for 3 color channels in input/output images)
    image_gen = model.Generator(3, 3)
    mask_gen = model.Generator(3, 3)
    image_disc = model.Discriminator(3)
    mask_disc = model.Discriminator(3)

    # Add networks onto gpu
    image_gen.to(device)
    mask_gen.to(device)
    image_disc.to(device)
    mask_disc.to(device)

    cyclic_loss = nn.L1Loss()

    # For more fine-grained training, could partitition this into 4 optimizers with different learning rates
    optimizer = optim.Adam(
        list(image_gen.parameters()) + list(mask_gen.parameters()) +
        list(image_disc.parameters()) + list(mask_disc.parameters()),
        lr=args.learning_rate)

    prev_epoch = 0
    # Loads in previous model if given
    if args.prev_model:
        checkpoint = torch.load(args.prev_model)
        image_gen.load_state_dict(checkpoint['image_gen_model'])
        mask_gen.load_state_dict(checkpoint['mask_gen_model'])
        image_disc.load_state_dict(checkpoint['image_disc_model'])
        mask_disc.load_state_dict(checkpoint['mask_disc_model'])
        optimizer.load_state_dict(checkpoint['optimizer_model'])
        prev_epoch = checkpoint['epoch']

    for epoch in range(prev_epoch, args.num_epochs):
        print("Epoch: ", epoch)
        for i, batch in enumerate(zip(A_dataloader, B_dataloader)):
            # Puts inputs onto gpu
            image, mask = batch[0].to(device), batch[1].to(device)

            # Make predictions
            predicted_image = image_gen(mask)
            predicted_mask = mask_gen(image)
            im_discrim_prob = image_disc(image)
            mask_discrim_prob = mask_disc(mask)
            f_im_discrim_prob = image_disc(predicted_image)
            f_mask_discrim_prob = mask_disc(predicted_mask)
            recov_image = image_gen(predicted_mask)
            recov_mask = mask_gen(predicted_image)
            identity_image = image_gen(image)
            identity_mask = mask_gen(mask)

            # reshape probabilities for loss function
            im_discrim_prob = torch.t(im_discrim_prob)
            mask_discrim_prob = torch.t(mask_discrim_prob)
            f_im_discrim_prob = torch.t(f_im_discrim_prob)
            f_mask_discrim_prob = torch.t(f_mask_discrim_prob)
            # Get generator losses
            optimizer.zero_grad()
            im_to_mask_gen_loss = -torch.mean(
                torch.log(1 - f_im_discrim_prob[0]) +
                torch.log(im_discrim_prob[0]))
            mask_to_im_gen_loss = -torch.mean(
                torch.log(1 - f_mask_discrim_prob[0]) +
                torch.log(mask_discrim_prob[0]))
            # Get cyclic losses
            cyclic_loss_im_to_mask = cyclic_loss(recov_image, image)
            cyclic_loss_mask_to_im = cyclic_loss(recov_mask, mask)
            # Total up gen losses and optimize
            gen_loss = im_to_mask_gen_loss + mask_to_im_gen_loss + \
                args.lambda_weighting * \
                (cyclic_loss_im_to_mask + cyclic_loss_mask_to_im)

            # # Get discriminator losses
            im_discrim_loss = torch.mean(
                torch.log(1 - im_discrim_prob[0]) +
                torch.log(f_im_discrim_prob[0]))
            mask_discrim_loss = torch.mean(
                torch.log(1 - mask_discrim_prob[0]) +
                torch.log(f_mask_discrim_prob[0]))
            discrim_loss = im_discrim_loss + mask_discrim_loss

            identity_loss = args.lambda_weighting * \
                (cyclic_loss(identity_image, image) +
                 cyclic_loss(identity_mask, mask))

            total_loss = gen_loss + identity_loss
            total_loss.backward()
            optimizer.step()
            print("gen1_loss:", im_to_mask_gen_loss)
            print("gen2_loss:", mask_to_im_gen_loss)
            print("cyclic_loss_gen1", cyclic_loss_im_to_mask)
            print("cyclic_loss_gen2", cyclic_loss_mask_to_im)
            print("gen_loss", gen_loss)
            print("dis1_loss", im_discrim_loss)
            print("dis2_loss", mask_discrim_loss)
            print("dis_loss", discrim_loss)
            print("identity_loss", identity_loss)
            print("total_loss", total_loss)

            if args.show_progress:
                # A Image
                image = transforms.ToPILImage()(
                    predicted_image.cpu().detach()[0, :, :, :])
                image.save("./Images/temp_A.png")
                # B Image
                image = transforms.ToPILImage()(
                    predicted_mask.cpu().detach()[0, :, :, :])
                image.save("./Images/temp_B.png")

        if args.show_images:
            image = transforms.ToPILImage()(
                predicted_image.cpu().detach()[0, :, :, :])
            image.save("./Images/epoch_" + str(epoch) + ".png")
        # Saves a checkpoint if needed
        if args.checkpoint_frequency and epoch % args.checkpoint_frequency == 0:
            torch.save(
                {
                    'epoch': epoch,
                    'gen_loss': gen_loss,
                    'discrim_loss': discrim_loss,
                    'image_gen_model': image_gen.state_dict(),
                    'mask_gen_model': mask_gen.state_dict(),
                    'image_disc_model': image_disc.state_dict(),
                    'mask_disc_model': mask_disc.state_dict(),
                    'optimizer_model': optimizer.state_dict()
                }, "./checkpoints/epoch_" + str(epoch) + ".tar")
    # Save last model after training
    torch.save(
        {
            'epoch': epoch,
            'gen_loss': gen_loss,
            'discrim_loss': discrim_loss,
            'image_gen_model': image_gen.state_dict(),
            'mask_gen_model': mask_gen.state_dict(),
            'image_disc_model': image_disc.state_dict(),
            'mask_disc_model': mask_disc.state_dict(),
            'optimizer_model': optimizer.state_dict()
        }, "./checkpoints/epoch_" + str(epoch) + ".tar")
Beispiel #19
0
                        default=2000,
                        type=float,
                        help='lambda value for adjusting balance between generator loss and GAN loss')
    args = parser.parse_args()

    """
    output_source
    1. input data has colored 26 alphabets 64 * (64 * 26) * 3
    2. get certain position of alphabets via alphabet_position function
    3. get output_source by concating source_list which is selected in (2)
       alphabets from input data(1)
    """

    # GENERATOR
    generator = Generator(args.latent_dim)
    generator_loss = nn.L1Loss()

    # ADVERSARIAL
    discriminator = Discriminator()
    discriminator_loss = nn.BCELoss()

    real = torch.ones((args.batch_size, 1), dtype=torch.float32, requires_grad=False)
    fake = torch.zeros((args.batch_size, 1), dtype=torch.float32, requires_grad=False)

    if args.load:
        print("=> loading checkpoint 'results/{}'".format(args.save_fpath))
        checkpoint = torch.load('results/' + args.save_fpath)
        prefix = 'module.'
        n_clip = len(prefix)
        gen = checkpoint['gen_model']
        adapted_gen = {k[n_clip:]: v for k, v in gen.items() if k.startswith(prefix)}
Beispiel #20
0
TEMPLATES = None
if MAPTYPE in ['tmp', 'pca_tmp']:
    TEMPLATES = get_templates()

MODEL_NAME = '{0}_{1}'.format(BACKBONE, MAPTYPE)
MODEL_DIR = MODEL_DIR + MODEL_NAME + '/'

MODEL = Model(MAPTYPE, TEMPLATES, 2, False)

OPTIM = optim.Adam(MODEL.model.parameters(),
                   lr=LEARNING_RATE,
                   weight_decay=WEIGHT_DECAY)
MODEL.model.cuda()
LOSS_CSE = nn.CrossEntropyLoss().cuda()
LOSS_L1 = nn.L1Loss().cuda()
MAXPOOL = nn.MaxPool2d(19).cuda()


def calculate_losses(batch):
    img = batch['img']
    msk = batch['msk']
    lab = batch['lab']
    x, mask, vec = MODEL.model(img)
    loss_l1 = LOSS_L1(mask, msk)
    loss_cse = LOSS_CSE(x, lab)
    loss = loss_l1 + loss_cse
    pred = torch.max(x, dim=1)[1]
    acc = (pred == lab).float().mean()
    res = {'lab': lab, 'msk': msk, 'score': x, 'pred': pred, 'mask': mask}
    results = {}
Beispiel #21
0
def train(model,
          data_loader,
          optimizer,
          init_lr=0.002,
          checkpoint_dir=None,
          checkpoint_interval=None,
          nepochs=None,
          clip_thresh=1.0):
    model.train()
    if use_cuda:
        model = model.cuda()
    linear_dim = model.final_output_dim

    criterion = nn.L1Loss()

    global global_step, global_epoch
    while global_epoch < nepochs:
        running_loss = 0.
        for step, (x, input_lengths, mel, y) in tqdm(enumerate(data_loader)):
            # Decay learning rate
            current_lr = _learning_rate_decay(init_lr, global_step)
            for param_group in optimizer.param_groups:
                param_group['lr'] = current_lr

            optimizer.zero_grad()

            # Sort by length
            sorted_lengths, indices = torch.sort(input_lengths.view(-1),
                                                 dim=0,
                                                 descending=True)
            sorted_lengths = sorted_lengths.long().numpy()

            x, mel, y = x[indices], mel[indices], y[indices]

            # Feed data
            x, mel, y = Variable(x), Variable(mel), Variable(y)
            if use_cuda:
                x, mel, y = x.cuda(), mel.cuda(), y.cuda()
            mel_outputs, linear_outputs, attn = model(
                x, mel, input_lengths=sorted_lengths)

            # Loss
            mel_loss = criterion(mel_outputs, mel)
            n_priority_freq = int(3000 / (fs * 0.5) * linear_dim)
            linear_loss = 0.5 * criterion(linear_outputs, y) \
                + 0.5 * criterion(linear_outputs[:, :, :n_priority_freq],
                                  y[:, :, :n_priority_freq])
            loss = mel_loss + linear_loss

            if global_step > 0 and global_step % checkpoint_interval == 0:
                save_states(global_step, mel_outputs, linear_outputs, attn, y,
                            sorted_lengths, checkpoint_dir)
                save_checkpoint(model, optimizer, global_step, checkpoint_dir,
                                global_epoch)

            # Update
            loss.backward()
            grad_norm = torch.nn.utils.clip_grad_norm(model.parameters(),
                                                      clip_thresh)
            optimizer.step()

            # Logs
            log_value("loss", float(loss.item()), global_step)
            log_value("mel loss", float(mel_loss.item()), global_step)
            log_value("linear loss", float(linear_loss.item()), global_step)
            log_value("gradient norm", grad_norm, global_step)
            log_value("learning rate", current_lr, global_step)

            global_step += 1
            running_loss += loss.item()

        averaged_loss = running_loss / (len(data_loader))
        log_value("loss (per epoch)", averaged_loss, global_epoch)
        print("Loss: {}".format(running_loss / (len(data_loader))))

        global_epoch += 1
Beispiel #22
0
def main():
    global opt, name, logger, model, criterion, SSIM_loss, start_time, mcs_num
    opt = parser.parse_args()
    print(opt)

    # Tag_BatchSize
    name = "%s_%d" % (opt.tag, opt.batchSize)

    mcs_num = "%s" % (opt.num_mcs)

    logger = SummaryWriter("runs/" + name)

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    seed = 1334
    torch.manual_seed(seed)
    if cuda:
        torch.cuda.manual_seed(seed)

    cudnn.benchmark = True

    print("==========> Loading datasets")

    indoor_test_dataset = TestFolder(opt.test, transform=Compose([ToTensor()]))

    testing_data_loader = DataLoader(dataset=indoor_test_dataset,
                                     num_workers=opt.threads,
                                     batch_size=1,
                                     pin_memory=True,
                                     shuffle=True)

    print("==========> Building model")

    backbone = models.resnet50(pretrained=True)
    model = ResNet50(backbone, num_classes=15)

    # criterion = EMDLoss()
    criterion = nn.L1Loss()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["state_dict"])
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['state_dict'].state_dict())
        else:
            print("=> no model found at '{}'".format(opt.pretrained))

    print("==========> Setting GPU")
    if cuda:
        model = model.cuda()
        criterion = criterion.cuda()

    else:
        model = model.cpu()
        criterion = criterion.cpu()

    print("==========> Testing")

    start_time = time.time()
    test(testing_data_loader)
Beispiel #23
0
    np_tensor = np.expand_dims(np_tensor, axis=0)
    if len(np_tensor.shape) == 3:
        np_tensor = np.expand_dims(np_tensor, axis=0)

    torch_tensor = torch.Tensor(np_tensor)
    return torch_tensor.cuda()


opt = TestOptions().parse()

model = Pix2PixModel(opt)
model.eval()

visualizer = Visualizer(opt)

criterionRGBL1 = nn.L1Loss()
criterionRGBL2 = nn.MSELoss()

# read data
data = single_inference_dataLoad(opt)
# forward
generated = model(data, mode='inference')
img_path = data['path']
print('process image... %s' % img_path)

# remove background
if opt.remove_background:
    if generated.shape[2] != data['label_tag'].shape[2]:
        data['label_tag'] = cv2_resize(data['label_tag'], generated.shape[2])
        data['image_tag'] = cv2_resize(data['image_tag'], generated.shape[2])
    generated = generated * data['label_tag'].float() + data['image_tag'] * (
Beispiel #24
0
def DAGH_algo(code_length, dataname):
    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu
    torch.manual_seed(0)
    torch.cuda.manual_seed(0)
    # code_length=8
    '''
    parameter setting
    '''
    max_iter = opt.max_iter
    epochs = opt.epochs
    batch_size = opt.batch_size
    learning_rate = opt.learning_rate
    weight_decay = 5 * 10**-4
    num_anchor = opt.num_anchor
    lambda_1 = float(opt.lambda_1)
    lambda_2 = float(opt.lambda_2)
    lambda_3 = float(opt.lambda_3)

    record['param']['opt'] = opt
    record['param']['description'] = '[Comment: learning rate decay]'
    logger.info(opt)
    logger.info(code_length)
    logger.info(record['param']['description'])
    '''
    dataset preprocessing
    '''
    # nums, dsets, labels = _dataset(dataname)
    nums, dsets, labels = load_dataset(dataname)
    num_database, num_test = nums
    dset_database, dset_test = dsets
    database_labels, test_labels = labels
    '''
    model construction
    '''
    beta = 2
    model = cnn_model.CNNNet(opt.arch, code_length)
    model.cuda()
    cudnn.benchmark = True
    DAGH_loss = dl.DAGHLoss(lambda_1, lambda_2, lambda_3, code_length)
    L1_criterion = nn.L1Loss()
    L2_criterion = nn.MSELoss()
    # optimizer = optim.SGD(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
    optimizer = optim.SGD(model.parameters(),
                          lr=learning_rate,
                          weight_decay=weight_decay,
                          momentum=0.9)  ####
    # optimizer = optim.RMSprop (model.parameters (), lr=learning_rate, weight_decay=weight_decay)
    # optimizer = optim.RMSprop (model.parameters (), lr=learning_rate, weight_decay=weight_decay, momentum=0.9)
    # optimizer = optim.Adadelta (model.parameters (), weight_decay=weight_decay)
    # optimizer = optim.Adam (model.parameters ())

    B = np.sign(np.random.randn(code_length, num_database))

    model.train()
    for iter in range(max_iter):
        iter_time = time.time()

        trainloader = DataLoader(dset_database,
                                 batch_size=batch_size,
                                 shuffle=True,
                                 num_workers=4)
        F = np.zeros((num_database, code_length), dtype=np.float)

        if iter == 0:
            '''
            initialize the feature of all images to build dist graph
            '''
            ini_Features = np.zeros((num_database, 4096), dtype=np.float)
            ini_F = np.zeros((num_database, code_length), dtype=np.float)
            for iteration, (train_input, train_label,
                            batch_ind) in enumerate(trainloader):
                train_input = Variable(train_input.cuda())
                output = model(train_input)
                ini_Features[batch_ind, :] = output[0].cpu().data.numpy()
                ini_F[batch_ind, :] = output[1].cpu().data.numpy()
            print('initialization dist graph forward done!')
            dist_graph = get_dist_graph(ini_Features, num_anchor)
            # dist_graph = np.random.rand(num_database,num_anchor)
            # bf = np.sign(ini_F)
            Z = calc_Z(dist_graph)
        elif (iter % 3) == 0:
            dist_graph = get_dist_graph(Features, num_anchor)
            Z = calc_Z(dist_graph)
            print('calculate dist graph forward done!')

        inv_A = inv(np.diag(Z.sum(0)))  # m X m
        Z_T = Z.transpose()  # m X n
        left = np.dot(B, np.dot(Z, inv_A))  # k X m

        if iter == 0:
            loss_ini = calc_loss(B, ini_F, Z, inv_A, lambda_1, lambda_2,
                                 lambda_3, code_length)
            # loss_ini2 = calc_all_loss(B,F,Z,inv_A,Z1,Z2,Y1,Y2,rho1,rho2,lambda_1,lambda_2)
            print(loss_ini)
        '''
        learning deep neural network: feature learning
        '''
        Features = np.zeros((num_database, 4096), dtype=np.float)
        for epoch in range(epochs):
            for iteration, (train_input, train_label,
                            batch_ind) in enumerate(trainloader):
                train_input = Variable(train_input.cuda())

                output = model(train_input)
                # Features[batch_ind, :] = output[0].cpu ().data.numpy ()
                # F[batch_ind, :] = output[1].cpu ().data.numpy ()

                batch_grad = get_batch_gard(
                    B, left, Z_T, batch_ind) / (code_length * batch_size)
                batch_grad = Variable(
                    torch.from_numpy(batch_grad).type(
                        torch.FloatTensor).cuda())
                optimizer.zero_grad()
                # output[1].backward(batch_grad, retain_graph=True)
                output[1].backward(batch_grad)

                B_cuda = Variable(
                    torch.from_numpy(B[:, batch_ind]).type(
                        torch.FloatTensor).cuda())
                # optimizer.zero_grad ()
                other_loss = DAGH_loss(output[1].t(), B_cuda)
                one_vectors = Variable(torch.ones(output[1].size()).cuda())
                L1_loss = L1_criterion(torch.abs(output[1]), one_vectors)
                # L2_loss = L2_criterion (output[1],B_cuda.t())
                All_loss = other_loss + lambda_3 * L1_loss / code_length
                All_loss.backward()

                optimizer.step()

                if (iteration % 200) == 0:
                    print('iteration:' + str(iteration))
                    #print (model.features[0].weight.data[1, 1, :, :])
                    #print (model.features[18].weight.data[1, 1, :, :])
                    #print (model.classifier[6].weight.data[:, 1])
        adjusting_learning_rate(optimizer, iter)

        trainloader2 = DataLoader(dset_database,
                                  batch_size=batch_size,
                                  shuffle=False,
                                  num_workers=4)
        F = get_F(model, trainloader2, num_database, code_length)
        Features = get_fearture(model, trainloader2, num_database, code_length)
        '''
        learning binary codes: discrete coding
        '''
        # bf = np.sign (F)

        # F = np.random.randn (num_database, 12)
        loss_before = calc_loss(B, F, Z, inv_A, lambda_1, lambda_2, lambda_3,
                                code_length)

        B = B_step(F, Z, inv_A)
        iter_time = time.time() - iter_time
        loss_ = calc_loss(B, F, Z, inv_A, lambda_1, lambda_2, lambda_3,
                          code_length)

        logger.info(
            '[Iteration: %3d/%3d][Train Loss: before:%.4f, after:%.4f]', iter,
            max_iter, loss_before, loss_)
        record['train loss'].append(loss_)
        record['iter time'].append(iter_time)
    '''
    training procedure finishes, evaluation
    '''
    model.eval()
    testloader = DataLoader(dset_test,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=4)
    qB = encode(model, testloader, num_test, code_length)
    rB = B.transpose()

    topKs = np.arange(1, 500, 50)
    top_ndcg = 100
    map = calc_hr.calc_map(qB, rB, test_labels.numpy(),
                           database_labels.numpy())
    top_map = calc_hr.calc_topMap(qB, rB, test_labels.numpy(),
                                  database_labels.numpy(), 2000)
    Pres = calc_hr.calc_topk_pres(qB, rB, test_labels.numpy(),
                                  database_labels.numpy(), topKs)
    ndcg = calc_hr.cal_ndcg_k(qB, rB, test_labels.numpy(),
                              database_labels.numpy(), top_ndcg)

    logger.info('[lambda_1: %.4f]', lambda_1)
    logger.info('[lambda_2: %.4f]', lambda_2)
    logger.info('[lambda_3: %.4f]', lambda_3)
    logger.info('[Evaluation: mAP: %.4f]', map)
    logger.info('[Evaluation: topK_mAP: %.4f]', top_map)
    logger.info('[Evaluation: Pres: %.4f]', Pres[0])
    logger.info('[Evaluation: topK_ndcg: %.4f]', ndcg)
    record['rB'] = rB
    record['qB'] = qB
    record['map'] = map
    record['topK_map'] = top_map
    record['topK_ndcg'] = ndcg
    record['Pres'] = Pres
    record['F'] = F
    filename = os.path.join(logdir, str(code_length) + 'bits-record.pkl')

    _save_record(record, filename)
    return top_map
Beispiel #25
0
def train(train_loader, model, optimizer, epoch):
    criterion = nn.L1Loss()
    batch_time = AverageMeter()
    losses = AverageMeter()

    model.train()

    cos = nn.CosineSimilarity(dim=1, eps=0)
    if use_cuda:
        get_gradient = sobel.Sobel().cuda()
    else:
        get_gradient = sobel.Sobel()  #.cuda()

    end = time.time()
    for i, sample_batched in enumerate(train_loader):
        image, depth = sample_batched['image'], sample_batched['depth']

        #depth = depth.cuda(async=True)
        if use_cuda:
            depth = depth.cuda()
            image = image.cuda()
        else:
            image = torch.autograd.Variable(image)
            depth = torch.autograd.Variable(depth)

        ones = torch.ones(depth.size(0), 1, depth.size(2),
                          depth.size(3)).float().cuda()
        ones = torch.autograd.Variable(ones)
        optimizer.zero_grad()

        output = model(image)
        #output = torch.nn.functional.upsample(output, size=[depth.size(2),depth.size(3)], mode='bilinear')
        output = torch.nn.functional.interpolate(
            output,
            size=[depth.size(2), depth.size(3)],
            mode='bilinear',
            align_corners=False)

        depth_grad = get_gradient(depth)
        output_grad = get_gradient(output)
        depth_grad_dx = depth_grad[:, 0, :, :].contiguous().view_as(depth)
        depth_grad_dy = depth_grad[:, 1, :, :].contiguous().view_as(depth)
        output_grad_dx = output_grad[:, 0, :, :].contiguous().view_as(depth)
        output_grad_dy = output_grad[:, 1, :, :].contiguous().view_as(depth)

        depth_normal = torch.cat((-depth_grad_dx, -depth_grad_dy, ones), 1)
        output_normal = torch.cat((-output_grad_dx, -output_grad_dy, ones), 1)

        # depth_normal = F.normalize(depth_normal, p=2, dim=1)
        # output_normal = F.normalize(output_normal, p=2, dim=1)

        loss_depth = torch.log(torch.abs(output - depth) + 0.5).mean()
        loss_dx = torch.log(torch.abs(output_grad_dx - depth_grad_dx) +
                            0.5).mean()
        loss_dy = torch.log(torch.abs(output_grad_dy - depth_grad_dy) +
                            0.5).mean()
        loss_normal = torch.abs(1 - cos(output_normal, depth_normal)).mean()

        # TODO: grad_dx, grad_dy being negative: is it ok or is something wrong here?
        #print("losses:",loss_depth, loss_dx, loss_dy, loss_normal)
        loss = loss_depth + loss_normal + (loss_dx + loss_dy)

        #losses.update(loss.data[0], image.size(0))
        losses.update(loss.data.item(), image.size(0))
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)
        end = time.time()

        batchSize = depth.size(0)

        print('Epoch: [{0}][{1}/{2}]\t'
              'Time {batch_time.val:.3f} ({batch_time.sum:.3f})\t'
              'Loss {loss.val:.4f} ({loss.avg:.4f})'.format(
                  epoch,
                  i,
                  len(train_loader),
                  batch_time=batch_time,
                  loss=losses))
def train(data_train, data_test):
    """
    See this script for more information
    https://github.com/MorvanZhou/PyTorch-Tutorial/blob/master/tutorial-contents/403_RNN_regressor.py
    :return:
    """
    data = data_train
    # # xxx = [item for xx in data for item in xx]
    # xxx = []
    # for xx in data:
    #     xxx.extend(xx.flatten())

    checkpoint_and_write_save_dir = logdir()

    os.system("mkdir -p checkpoints")
    os.system("mkdir -p checkpoints/{}".format(checkpoint_and_write_save_dir))

    writer = SummaryWriter(os.path.join("runs", checkpoint_and_write_save_dir),
                           comment="FreqWarp")

    logging.info("Building architecture...")

    if use_cuda:
        net = Net(20, 20, 20, nb_lstm_layers, batch_size).cuda()
    else:
        net = Net(20, 20, 20, nb_lstm_layers, batch_size)
    net.train()

    # optimizer = optim.SGD(net.parameters(), lr=0.001)
    # optimizer = optim.Adam(net.parameters(), lr=0.005, weight_decay=0.0001)
    optimizer = optim.RMSprop(net.parameters(), lr=0.005, weight_decay=0.0001)

    # criterion = nn.MSELoss()
    criterion = nn.L1Loss(size_average=False)

    logging.info("Reading data ...")

    best_avg_loss = 1000000
    best_avg_loss_at_epoch = 0

    logging.info("START TRAINING ... MAX EPOCH: " + str(nb_epoch))
    for epoch in range(nb_epoch):
        print(
            "===================================================================="
        )
        count = 0
        loss_sum = 0

        for i in range(len(data)):
            if use_cuda:
                temp_x = torch.tensor(data[i][0]).cuda()
                temp_y = torch.tensor(data[i][1]).cuda()
            else:
                temp_x = torch.tensor(data[i][0])
                temp_y = torch.tensor(data[i][1])

            # exit()
            # for ii in range(0, data[i][0].shape[0] - nb_frame_in_batch*2 + 1):
            optimizer.zero_grad()

            h_state = net.hidden_init(
                temp_x
            )  # New added Dec 07: They say hidden state need to be clear before each step

            # prediction, h_state = net(batch_x.float(), h_state)
            prediction, h_state = net(temp_x.float(), h_state)
            # prediction = net(batch_x.unsqueeze(0).float(), None)

            loss = criterion(prediction.float(),
                             temp_y.float().view(len(temp_y), batch_size, -1))

            # h_state = (h_state[0].detach(), h_state[1].detach())

            loss.backward()
            optimizer.step()

            loss_sum += loss
            count += 1

        else:
            with torch.no_grad():
                losses = []
                for i in range(len(data_test)):
                    if use_cuda:
                        temp_x = torch.tensor(data_test[i][0]).cuda()
                        temp_y = torch.tensor(data_test[i][1]).cuda()
                    else:
                        temp_x = torch.tensor(data_test[i][0])
                        temp_y = torch.tensor(data_test[i][1])

                    h_state = net.hidden_init(temp_x)
                    prediction, h_state = net(temp_x.float(), h_state)
                    loss = criterion(
                        prediction.float(),
                        temp_y.float().view(len(temp_y), batch_size, -1))

                    losses.append(loss.data.item())
            logging.info(describe(losses))

            writer.add_scalar("loss/minibatch",
                              loss_sum / count,
                              global_step=epoch)
            # writer.add_graph(net, (temp_x.float(), h_state), verbose=True)

        # for m_index, m in enumerate(net.parameters()):
        #     print(m_index)
        #     print(net_modules[m_index])
        #     writer.add_histogram('histogram/', m.data, global_step=epoch)
        for name, param in net.named_parameters():
            writer.add_histogram('histogram/' + name,
                                 param.data,
                                 global_step=epoch)

        avg_loss = loss_sum / count
        if avg_loss < best_avg_loss:
            state = {'epoch': epoch, 'state_dict': net, 'optimizer': optimizer}

            save_checkpoint(checkpoint_and_write_save_dir + "/" +
                            MODEL_PTH_NAME + "_epoch" + str(epoch) + "_" +
                            str(round(float(avg_loss), 3)),
                            model=net,
                            state=state)

            logging.info(
                "Epoch {}: average loss = {:.3f}, improve {:.3f} from {:.3f}. Model saved at checkpoints/{}/{}.pth"
                .format(
                    epoch, avg_loss, best_avg_loss - avg_loss, best_avg_loss,
                    checkpoint_and_write_save_dir, MODEL_PTH_NAME + "_epoch" +
                    str(epoch) + "_" + str(round(float(avg_loss), 3))))

            best_avg_loss = avg_loss
            best_avg_loss_at_epoch = epoch

        elif epoch - best_avg_loss_at_epoch > patience:
            logging.info(
                "Model hasn't improved since epoch {}. Stop training ...".
                format(best_avg_loss_at_epoch))
            break
        else:
            logging.info(
                "Epoch {}: average loss = {:.3f}. No improvement since epoch {}"
                .format(epoch, avg_loss, best_avg_loss_at_epoch))

    writer.close()

    return net
Beispiel #27
0
    def __init__(self, opt):
        super(SRCosRaGANModel, self).__init__(opt)
        train_opt = opt['train']

        # define networks and load pretrained models
        self.netG = networks.define_G(opt).to(self.device)  # G
        if self.is_train:
            self.netD = networks.define_D(opt).to(self.device)  # D
            self.netG.train()
            self.netD.train()
        self.load()  # load G and D if needed

        # define losses, optimizer and scheduler
        if self.is_train:
            # G pixel loss
            if train_opt['pixel_weight'] > 0:
                l_pix_type = train_opt['pixel_criterion']
                if l_pix_type == 'l1':
                    self.cri_pix = nn.L1Loss().to(self.device)
                elif l_pix_type == 'l2':
                    self.cri_pix = nn.MSELoss().to(self.device)
                else:
                    raise NotImplementedError(
                        'Loss type [{:s}] not recognized.'.format(l_pix_type))
                self.l_pix_w = train_opt['pixel_weight']
            else:
                logger.info('Remove pixel loss.')
                self.cri_pix = None

            # G feature loss
            if train_opt['feature_weight'] > 0:
                l_fea_type = train_opt['feature_criterion']
                if l_fea_type == 'l1':
                    self.cri_fea = nn.L1Loss().to(self.device)
                elif l_fea_type == 'l2':
                    self.cri_fea = nn.MSELoss().to(self.device)
                elif l_fea_type == 'cos':
                    self.cri_fea = nn.CosineEmbeddingLoss().to(self.device)
                else:
                    raise NotImplementedError(
                        'Loss type [{:s}] not recognized.'.format(l_fea_type))
                self.l_fea_w = train_opt['feature_weight']
            else:
                logger.info('Remove feature loss.')
                self.cri_fea = None
            if self.cri_fea:  # load VGG perceptual loss
                self.netF = networks.define_F(opt,
                                              use_bn=False).to(self.device)

            # GD gan loss
            self.cri_gan = GANLoss(train_opt['gan_type'], 1.0,
                                   0.0).to(self.device)
            self.l_gan_w = train_opt['gan_weight']
            # D_update_ratio and D_init_iters are for WGAN
            self.D_update_ratio = train_opt['D_update_ratio'] if train_opt[
                'D_update_ratio'] else 1
            self.D_init_iters = train_opt['D_init_iters'] if train_opt[
                'D_init_iters'] else 0

            if train_opt['gan_type'] == 'wgan-gp':
                self.random_pt = torch.Tensor(1, 1, 1, 1).to(self.device)
                # gradient penalty loss
                self.cri_gp = GradientPenaltyLoss(device=self.device).to(
                    self.device)
                self.l_gp_w = train_opt['gp_weigth']

            # optimizers
            # G
            wd_G = train_opt['weight_decay_G'] if train_opt[
                'weight_decay_G'] else 0
            optim_params = []
            for k, v in self.netG.named_parameters(
            ):  # can optimize for a part of the model
                if v.requires_grad:
                    optim_params.append(v)
                else:
                    logger.warning(
                        'Params [{:s}] will not optimize.'.format(k))
            # self.optimizer_G = torch.optim.SGD(optim_params, lr=train_opt['lr_G'])
            self.optimizer_G = torch.optim.Adam(optim_params, lr=train_opt['lr_G'], \
                weight_decay=wd_G, betas=(train_opt['beta1_G'], 0.999))
            self.optimizers.append(self.optimizer_G)
            # D
            wd_D = train_opt['weight_decay_D'] if train_opt[
                'weight_decay_D'] else 0
            # self.optimizer_D = torch.optim.SGD(self.netD.parameters(), lr=train_opt['lr_D'])
            self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=train_opt['lr_D'], \
                weight_decay=wd_D, betas=(train_opt['beta1_D'], 0.999))
            self.optimizers.append(self.optimizer_D)

            # schedulers
            if train_opt['lr_scheme'] == 'MultiStepLR':
                for optimizer in self.optimizers:
                    self.schedulers.append(lr_scheduler.MultiStepLR(optimizer, \
                        train_opt['lr_steps'], train_opt['lr_gamma']))
            else:
                raise NotImplementedError(
                    'MultiStepLR learning rate scheme is enough.')

            self.log_dict = OrderedDict()
        # print network
        self.print_network()
Beispiel #28
0
def main():
    global opt, name, logger, model, criterion_L1, criterion_mse, best_psnr, loss_network
    global edge_loss
    opt = parser.parse_args()
    print(opt)
    import random

    opt.best_psnr = 0
    # Tag_ResidualBlocks_BatchSize
    name = "%s_%d" % (opt.tag, opt.batchSize)
    logger = SummaryWriter("runs/" + name)

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")
    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    opt.seed_python = random.randint(1, 10000)
    random.seed(opt.seed_python)
    print("Random Seed_python: ", opt.seed_python)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)
    cudnn.benchmark = True

    print("==========> Loading datasets")
    train_data_dir = opt.train
    val_data_dir = opt.test
    # --- Load training data and validation/test data --- #
    training_data_loader = DataLoader(TrainData([240, 240], train_data_dir),
                                      batch_size=opt.batchSize,
                                      shuffle=True,
                                      num_workers=12)
    indoor_test_loader = DataLoader(ValData(val_data_dir),
                                    batch_size=1,
                                    shuffle=False,
                                    num_workers=12)

    print("==========> Building model")
    model = DehazeNet()
    criterion_mse = nn.MSELoss(size_average=True)
    criterion_L1 = nn.L1Loss(size_average=True)

    print(model)
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["state_dict"])
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    # --- Set the GPU --- #
    print("==========> Setting GPU")
    if cuda:
        model = nn.DataParallel(model,
                                device_ids=[i
                                            for i in range(opt.gpus)]).cuda()
        criterion_L1 = criterion_L1.cuda()
        criterion_mse = criterion_mse.cuda()


# --- Calculate all trainable parameters in network --- #
    pytorch_total_params = sum(p.numel() for p in model.parameters()
                               if p.requires_grad)
    print("Total_params: {}".format(pytorch_total_params))
    #进行设置优化器
    print("==========> Setting Optimizer")
    # --- Build optimizer --- #
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
    #开始训练
    print("==========> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        adjust_learning_rate_second(optimizer, epoch - 1)
        train(training_data_loader, indoor_test_loader, optimizer, epoch)
        save_checkpoint(model, epoch, name)
        test(indoor_test_loader, epoch)
Beispiel #29
0
                                            shuffle=True,
                                            num_workers=4)
b_test_loader = torch.utils.data.DataLoader(b_test_data,
                                            batch_size=3,
                                            shuffle=True,
                                            num_workers=4)

a_fake_pool = utils.ItemPool()
b_fake_pool = utils.ItemPool()
""" model """
Da = models.Discriminator()
Db = models.Discriminator()
Ga = models.Generator()
Gb = models.Generator()
MSE = nn.MSELoss()
L1 = nn.L1Loss()
utils.cuda([Da, Db, Ga, Gb])

da_optimizer = torch.optim.Adam(Da.parameters(), lr=lr, betas=(0.5, 0.999))
db_optimizer = torch.optim.Adam(Db.parameters(), lr=lr, betas=(0.5, 0.999))
ga_optimizer = torch.optim.Adam(Ga.parameters(), lr=lr, betas=(0.5, 0.999))
gb_optimizer = torch.optim.Adam(Gb.parameters(), lr=lr, betas=(0.5, 0.999))
""" load checkpoint """
ckpt_dir = './checkpoints/horse2zebra'
utils.mkdir(ckpt_dir)
try:
    ckpt = utils.load_checkpoint(ckpt_dir)
    start_epoch = ckpt['epoch']
    Da.load_state_dict(ckpt['Da'])
    Db.load_state_dict(ckpt['Db'])
    Ga.load_state_dict(ckpt['Ga'])
Beispiel #30
0
 def __init__(self):
     self.loss = nn.L1Loss()