Esempio n. 1
0
def main():

    # Training Setting
    args = parser.parse_args()

    use_cuda = not args.no_cuda and torch.cuda.is_available()

    device = torch.device('cuda' if use_cuda else 'cpu')

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    # Visualize the value of loss function in the train process
    visual = Visualization() if args.visualization else None

    # Define dataset
    train_dataset = CustomDataset('./train.txt')
    eval_dataset = CustomDataset('./eval.txt')

    # Define dataloader
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)

    eval_loader = torch.utils.data.DataLoader(dataset=eval_dataset,
                                              batch_size=args.eval_batch_size,
                                              shuffle=False,
                                              **kwargs)

    # Define nerual network model
    model = Net().to(device)

    # Set Optimizier to calculate gradients
    optimizer = optim.Adadelta(model.parameters(), lr=args.lr)

    # Scheduler of decreasing learning rate each epoch
    scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)

    # Train neural network
    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, optimizer, epoch)
        eval_loss = evaluate(args, model, device, eval_loader)
        scheduler.step()

        print("R2: ", calculate_R(args, model, device, eval_loader))

        # Update image
        if args.visualization:
            visual.data_update(epoch, eval_loss)
            visual.render()

    # Save model
    if args.save_model:
        torch.save(model.state_dict(), "model_record.pt")

    # Keep showing picture after training model
    if args.visualization:
        visual.terminate()
Esempio n. 2
0
    def extract_feature(self, model_name, num_classes):
        model = resnet26(num_classes)
        # checkpoint = torch.load(model_name)
        # model.load_state_dict(checkpoint)
        path = '/home/students/student3_15/00_astar/00_baseline/spottune/cv/drkaggle/drkaggle.pkl'
        model.load_state_dict(torch.load(path))

        #load data
        data = {
            'train': CustomDataset(split="train", seed=42, step='isolation'),
            'test': CustomDataset(split="test", seed=42, step='isolation')
        }
        dataloaders = {
            'train': DataLoader(data['train'], batch_size=20, shuffle=True),
            'test': DataLoader(data['test'], batch_size=20, shuffle=False)
        }

        device = torch.device("cuda:" +
                              self.gpu if torch.cuda.is_available() else "cpu")
        model.to(device)
        model.eval()
        feature_list = []
        label_list = []
        train = []
        test = []
        with torch.no_grad():
            for split in ['train', 'test']:
                for i, (inputs, labels) in enumerate(dataloaders[split]):

                    inputs = inputs.to(device)
                    labels = labels.to(device)
                    # x_feature_batchs= model(inputs)
                    x_feature_batchs = model(inputs)

                    if i:
                        features = np.concatenate([features, x_feature_batchs],
                                                  axis=0)
                        label = np.concatenate([label, labels])
                    else:
                        features = x_feature_batchs
                        label = labels
                feature_list.append(features)
                label_list.append(label)

        np.save("feature1.npy", feature_list[0])
        np.save("feature2.npy", feature_list[1])
        np.save("label1.npy", label_list[0])
        np.save('label2.npy', label_list[1])
        return feature_list[0], feature_list[1]
Esempio n. 3
0
File: main.py Progetto: llllxt/fyp
def train(model_name, num_classes, feature_extract, num_epochs, batchsize,
          save_name, gpu):
    model_ft = initialize_model(model_name,
                                num_classes,
                                feature_extract,
                                use_pretrained=True)
    print(model_ft)

    print("initializing datasets and dataloaders")
    data = {
        'train': CustomDataset(split="train", seed=0, step='finetune'),
        'test': CustomDataset(split="test", seed=0, step='finetune')
    }
    dataloaders = {
        'train': DataLoader(data['train'], batch_size=batchsize, shuffle=True),
        'test': DataLoader(data['test'], batch_size=batchsize, shuffle=False)
    }

    device = torch.device("cuda:" +
                          gpu if torch.cuda.is_available() else "cpu")

    model_ft = model_ft.to(device)
    params_to_update = model_ft.parameters()
    print("Params to learn:")
    if feature_extract:
        params_to_update = []
        for name, param in model_ft.named_parameters():
            if param.requires_grad == True:
                params_to_update.append(param)
                print("\t", name)
    else:
        for name, param in model_ft.named_parameters():
            if param.requires_grad == True:
                print("\t", name)
    optimizer_ft = optim.SGD(params_to_update, lr=0.001, momentum=0.9)

    #train and evaluate
    model_ft = train_model(model_ft,
                           optimizer_ft,
                           dataloaders,
                           device,
                           save_name,
                           num_epochs=num_epochs)
    test_model(model_ft, dataloaders, gpu)
    return model_ft
Esempio n. 4
0
def main(input_file, output_file, segmentation_model,
         seg_model_name, transform_model_name, frame_skip, batch_size,
         max_frames):
    """
    used to create a gif with lines segmented from a video files
    """
    assert output_file.endswith('.gif'), 'Make sure output_file is a .gif'

    print('Loading models..')
    num_classes = 4
    input_channels = 3
    model_seg = get_seg_model(seg_model_name, num_classes, input_channels).to(device)
    model_seg.load(segmentation_model)

    print('Loading data..')
    cap = cv2.VideoCapture(input_file)
    images = torch.tensor(get_frames(cap, frame_skip, max_frames)).to(torch.uint8)
    num_images = images.shape[0]
    data_iterator = DataLoader(
        dataset=CustomDataset(images),
        batch_size=batch_size,
        shuffle=False,
        num_workers=2,
        drop_last=False
    )
    print('\tNumber of frames to convert:\t{} (frame skip: {})'.format(num_images, frame_skip))

    print('Converting..')
    model_seg.eval()
    batch_count = 0
    gif_frames = []
    with torch.no_grad():
        start = timer()
        for images in data_iterator:
            batch_count += 1

            # get segmentation
            seg_logits = model_seg(images.to(device))
            seg_preds = torch.argmax(seg_logits, dim=1).cpu()

            source = torch.mul(images.cpu(), 255).to(torch.uint8)
            segmented = logit_to_img(seg_preds.cpu().numpy()).transpose(0, 3, 1, 2)
            segmented = torch.mul(torch.tensor(segmented), 255).to(torch.uint8)

            # convert torch predictions to frames of grid
            gif_frames.extend(convert_batch_to_frames(source, segmented))

            if batch_count % 50 == 0:
                print('\tframe {} / {} - {:.2f} secs'.format(
                    batch_count*batch_size, num_images, timer() - start)
                )
                start = timer()

        del images, source, segmented,
        # convert sequence of frames into gif
        print('Saving {}..'.format(output_file))
        imageio.mimsave(output_file, gif_frames, fps=29.97/frame_skip, subrectangles=True)
Esempio n. 5
0
data_transform = transforms.Compose([
    transforms.Resize(model_dimension),
    transforms.CenterCrop(center_crop),
    transforms.ToTensor(),
    normalize,
])

print('===> Loading datasets')

if opt.mode == 'train':
    # train_set = torchvision.datasets.ImageFolder(root = opt.imagenetTrain, transform = data_transform)
    train_set =  ImageFolder(root='ILSVRC/train',transform=data_transform) # ChangedHere
    training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)

# test_set = torchvision.datasets.ImageFolder(root = opt.imagenetVal, transform = data_transform)
test_set = CustomDataset(subset='valid',root_dir='ILSVRC',transform=data_transform) #ChangedHere
testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=True)

print("Dataset Validation Done")
if opt.foolmodel == 'incv3':
    pretrained_clf = torchvision.models.inception_v3(pretrained=True)
elif opt.foolmodel == 'vgg16':
    pretrained_clf = torchvision.models.vgg16(pretrained=True)
elif opt.foolmodel == 'vgg19':
    pretrained_clf = torchvision.models.vgg19(pretrained=True)

# device = torch.device((gpulist[0])) # Might need to Fix for Multi GPU training
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

pretrained_clf = pretrained_clf.to(device)
Esempio n. 6
0
                    help='number of epochs between saving models')
parser.add_argument(
    '--savemaxsize',
    action='store_true',
    help='save sample images at max resolution instead of real resolution')
parser.add_argument('--verbose',
                    action='store_true',
                    help='show training progression')

opt = parser.parse_args()
print(opt)

DEVICE = torch.device('cuda:0')
MAX_RES = 7  # 8 for 1024x1024 output

dataset = CustomDataset()

# creating output folders
if not os.path.exists(opt.outd):
    os.makedirs(opt.outd)
for f in [opt.outf, opt.outl, opt.outm]:
    if not os.path.exists(os.path.join(opt.outd, f)):
        os.makedirs(os.path.join(opt.outd, f))

# Model creation and init
G = torch.nn.DataParallel(Generator(max_res=MAX_RES,
                                    nch=opt.nch,
                                    nc=1,
                                    bn=opt.BN,
                                    ws=opt.WS,
                                    pn=opt.PN,
Esempio n. 7
0
    def extract_feature(self):
        # model = resnet26()
        # file_path = "resnet26_pretrained.t7"
        # checkpoint = torch.load(file_path)
        # model = checkpoint['net']
        # for name, module in model._modules.items():
        #     self.recursion_change_bn(model)
        net = resnet26()
        checkpoint = torch.load("resnet26_pretrained.t7")
        net_old = checkpoint['net']

        store_data = []
        t = 0
        for name, m in net_old.named_modules():
            if isinstance(m, nn.Conv2d):
                store_data.append(m.weight.data)
                t += 1

        element = 0
        for name, m in net.named_modules():
            if isinstance(m, nn.Conv2d) and 'parallel_blocks' not in name:
                m.weight.data = torch.nn.Parameter(store_data[element].clone())
                element += 1

        element = 1
        for name, m in net.named_modules():
            if isinstance(m, nn.Conv2d) and 'parallel_blocks' in name:
                m.weight.data = torch.nn.Parameter(store_data[element].clone())
                element += 1

        store_data = []
        store_data_bias = []
        store_data_rm = []
        store_data_rv = []
        for name, m in net_old.named_modules():
            if isinstance(m, nn.BatchNorm2d):
                store_data.append(m.weight.data)
                store_data_bias.append(m.bias.data)
                store_data_rm.append(m.running_mean)
                store_data_rv.append(m.running_var)

        element = 0
        for name, m in net.named_modules():
            if isinstance(m, nn.BatchNorm2d) and 'parallel_block' not in name:
                m.weight.data = torch.nn.Parameter(store_data[element].clone())
                m.bias.data = torch.nn.Parameter(
                    store_data_bias[element].clone())
                m.running_var = store_data_rv[element].clone()
                m.running_mean = store_data_rm[element].clone()
                element += 1

        element = 1
        for name, m in net.named_modules():
            if isinstance(m, nn.BatchNorm2d) and 'parallel_block' in name:
                m.weight.data = torch.nn.Parameter(store_data[element].clone())
                m.bias.data = torch.nn.Parameter(
                    store_data_bias[element].clone())
                m.running_var = store_data_rv[element].clone()
                m.running_mean = store_data_rm[element].clone()
                element += 1
        model = net
        print(model)
        model.cuda()
        model.eval()
        dict1 = {}
        for name, param in model.named_parameters():
            dict1[name] = param
        print("###############dict1$#############")
        print(dict1)
        data = {
            'train': CustomDataset(split="train", seed=42),
            'test': CustomDataset(split="test", seed=42)
        }
        dataloaders = {
            'train': DataLoader(data['train'], batch_size=20, shuffle=True),
            'test': DataLoader(data['test'], batch_size=20, shuffle=False)
        }

        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

        feature_list = []
        with torch.no_grad():
            for split in ['train', 'test']:
                for i, (inputs, labels) in enumerate(dataloaders[split]):
                    inputs = inputs.to(device)
                    labels = labels.to(device)
                    # x_feature_batchs= model(inputs)
                    x_feature_batchs = model(inputs)

                    if i:
                        features = np.concatenate([features, x_feature_batchs],
                                                  axis=0)
                    else:
                        features = x_feature_batchs
                feature_list.append(features)

        np.save("feature1.npy", feature_list[0])
        np.save("feature2.npy", feature_list[1])
        return feature_list[0], feature_list[1]
Esempio n. 8
0
def main():
    parser = argparse.ArgumentParser(
        description='Micro-seismic earthquake detection: "Training" Step')
    parser.add_argument('--config',
                        required=True,
                        metavar='FILE',
                        dest='config_file',
                        help='configuration file to use')
    parser.add_argument('--batch-size',
                        type=int,
                        default=32,
                        metavar='N',
                        help='input batch size for training (default: 32)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=100,
                        metavar='N',
                        help='input batch size for testing (default: 100)')
    parser.add_argument(
        '--num-workers',
        type=int,
        default=1,
        metavar='W',
        help='how many subprocesses to use for data loading (default: 1)')
    parser.add_argument('--epochs',
                        type=int,
                        default=10,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.0001,
                        metavar='LR',
                        help='learning rate (default: 0.0001)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        dest='use_cuda',
                        action='store_false',
                        default=True,
                        help='disables CUDA training')
    parser.add_argument(
        '--train-split-ratio',
        type=float,
        default=0.8,
        metavar='R',
        help='the portion of the dataset used for training (default: 0.8)')
    parser.add_argument('--random-split',
                        action='store_true',
                        default=False,
                        help='shuffle the dataset before splitting')
    parser.add_argument('--seed',
                        type=int,
                        default=None,
                        metavar='S',
                        help='seed Python and Pytorch RNGs (default: None)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument(
        '--test-model',
        action='store_true',
        default=False,
        help='test final model and save predictions and ground-truth arrays')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=False,
                        help='save the current (trained) model')
    parser.add_argument(
        '--save-costs',
        action='store_true',
        default=False,
        help='write train and test cost values to an ascii column file')
    parser.add_argument(
        '--summary',
        action='store_true',
        default=False,
        help='write summarized info on current model to a plain text file')
    parser.add_argument(
        '--logfile',
        action='store_true',
        default=False,
        help='write logfile for current run to a plain text file')

    args = parser.parse_args()

    # ----------
    tic = datetime.now()
    tic_str = tic.strftime('%Y-%m-%d_%H-%M')

    # CUDA for PyTorch
    use_cuda = args.use_cuda and torch.cuda.is_available()
    device = torch.device("cuda:0" if use_cuda else "cpu")

    # Set seed for RNGs
    if args.seed:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        torch.cuda.manual_seed(args.seed)  # for current GPU
        torch.cuda.manual_seed_all(args.seed)  # for all GPUs

    # Improve performance by enabling benchmarking feature
    # see: https://pytorch.org/docs/stable/notes/randomness.html
    cudnn.benchmark = True

    # ----------
    # Read dataset config file
    config = read_config(args.config_file)
    images_dirname = pathlib.Path(config.images_dirname)
    images_paths_pattern = str(
        images_dirname.joinpath(config.images_paths_pattern))
    images_filename_tmpl = str(
        images_dirname.joinpath(config.images_filename_tmpl))

    targets_filename = config.norm_params_filename

    # ----------
    # kwargs for DataLoader
    dataloader_kwargs = {'num_workers': args.num_workers, 'shuffle': True}

    if use_cuda:
        dataloader_kwargs['pin_memory'] = True

    # Dataset partitioning + prepare targets
    partition = get_train_test_partition(images_paths_pattern,
                                         args.train_split_ratio,
                                         args.random_split)

    train_targets, test_targets = \
        get_train_test_targets(targets_filename, partition)

    # Training-set generator
    train_loader = DataLoader(dataset=CustomDataset(images_filename_tmpl,
                                                    partition['train'],
                                                    train_targets),
                              batch_size=args.batch_size,
                              **dataloader_kwargs)

    # Test-set generator
    test_loader = DataLoader(dataset=CustomDataset(images_filename_tmpl,
                                                   partition['test'],
                                                   test_targets),
                             batch_size=args.test_batch_size,
                             **dataloader_kwargs)

    # ----------
    # Create the model object
    model = MultiTaskConvNet().to(device)

    # Adam optimizer
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    # Loss function
    class_criterion = nn.BCEWithLogitsLoss()
    bbox_criterion = nn.MSELoss()

    train_costs, test_costs = [], []
    for epoch in range(1, args.epochs + 1):
        # Training step
        train_cost = train(model, optimizer, class_criterion, bbox_criterion,
                           train_loader, device, epoch, args)

        # Evaluation step
        test_cost = evaluate(model, class_criterion, bbox_criterion,
                             test_loader, device)

        print('\nTrain-set epoch cost : {:.9f}'.format(train_cost))
        print('Test-set average loss: {:.9f}\n'.format(test_cost))

        train_costs.append(train_cost)
        test_costs.append(test_cost)

    # ----------
    fstem_dated = '{}_{}'.format(g_output_fstem, tic_str)

    # Save trained model
    if args.save_model:
        filename_model = fstem_dated + '_model.pth'
        torch.save(model.state_dict(), filename_model)

    # Save cost values
    if args.save_costs:
        filename_costs = fstem_dated + '_cost.nc'
        costs = xr.DataArray(np.vstack((train_costs, test_costs)).T,
                             dims=['epoch_index', 'epoch_cost'],
                             coords={'epoch_cost': ['train', 'test']})
        costs.to_netcdf(filename_costs)

    # Save model predictions
    if args.test_model:
        predictions, groundtruth = test(model, test_loader, device, args)
        predictions.to_netcdf(fstem_dated + '_pred.nc')
        groundtruth.to_netcdf(fstem_dated + '_true.nc')

    # ----------
    toc = datetime.now()

    # Write log-file
    if args.logfile:
        filename_logfile = fstem_dated + '_log.txt'
        with open(filename_logfile, 'w') as fid:
            fid.write('\n'.join(sys.argv[1:]))
            fid.write('\n')

            if args.save_model:
                fid.write('Trained model: {}\n'.format(filename_model))

            fid.write('Started at: {}\n'.format(tic))
            fid.write('Finished at: {}\n'.format(toc))

    # Write model summary
    if args.summary:
        dummy_model = MultiTaskConvNet().to(device)
        filename_summary = fstem_dated + '_summary.txt'
        original_stdout = sys.stdout

        with open(filename_summary, 'w') as fid:
            fid.write(str(dummy_model))
            fid.write('\n' * 3)

            # NOTE: `device` arg should be `str` not `torch.device`
            sys.stdout = fid
            images_batch, labels_batch = next(iter(test_loader))
            input_shape = images_batch.shape[1:]
            summary(dummy_model,
                    input_shape,
                    batch_size=args.batch_size,
                    device=device.type)

            sys.stdout = original_stdout
Esempio n. 9
0
def train(train_samples,
          valid_samples,
          word2num,
          max_len_statement,
          max_len_subject,
          max_len_speaker_pos,
          max_len_context,
          lr=0.001,
          epoch=1,
          use_cuda=False,
          batch_size=20,
          batch_size_val=5,
          model_path='models'):

    print('Training...')

    # Prepare training data
    print('  Preparing training data...')
    statement_word2num = word2num[0]
    subject_word2num = word2num[1]
    speaker_word2num = word2num[2]
    speaker_pos_word2num = word2num[3]
    state_word2num = word2num[4]
    party_word2num = word2num[5]
    context_word2num = word2num[6]

    # train_data = train_samples
    train_data = CustomDataset(train_samples, max_len_statement,
                               max_len_subject, max_len_speaker_pos,
                               max_len_context)
    train_loader = DataLoader(train_data,
                              batch_size=batch_size,
                              collate_fn=collate_fn)

    # dataset_to_variable(train_data, use_cuda)
    valid_data = valid_samples
    valid_samples = CustomDataset(valid_samples, max_len_statement,
                                  max_len_subject, max_len_speaker_pos,
                                  max_len_context)
    valid_loader = DataLoader(valid_samples,
                              batch_size=batch_size_val,
                              collate_fn=collate_fn)

    # dataset_to_variable(valid_data, use_cuda)

    # Construct model instance
    print('  Constructing network model...')
    model = Net(len(statement_word2num), len(subject_word2num),
                len(speaker_word2num), len(speaker_pos_word2num),
                len(state_word2num), len(party_word2num),
                len(context_word2num))
    if use_cuda:
        print('using cuda')
        model.cuda()

    # Start training
    print('  Start training')

    optimizer = optim.Adam(model.parameters(), lr=lr)
    lr_scheduler = ReduceLROnPlateau(optimizer=optimizer,
                                     mode='max',
                                     factor=0.5,
                                     patience=5)
    model.train()

    step = 0
    display_interval = 50
    optimal_val_acc = 0

    for epoch_ in range(epoch):
        print('  ==> Epoch ' + str(epoch_) + ' started.')
        # random.shuffle(train_data)
        total_loss = 0
        for (inputs_statement, inputs_subject, inputs_speaker,
             inputs_speaker_pos, inputs_state, inputs_party, inputs_context,
             target) in train_loader:

            # sample = [inputs_statement, inputs_subject, inputs_speaker, inputs_speaker_pos, inputs_state, inputs_party, inputs_context]
            optimizer.zero_grad()
            if use_cuda:
                inputs_statement.cuda()
                inputs_subject.cuda()
                inputs_speaker.cuda()
                inputs_speaker_pos.cuda()
                inputs_state.cuda()
                inputs_party.cuda()
                inputs_context.cuda()
                # sample.cuda()
                target.cuda()

            prediction = model(inputs_statement, inputs_subject,
                               inputs_speaker, inputs_speaker_pos,
                               inputs_state, inputs_party, inputs_context)
            # label = Variable(torch.LongTensor([sample.label]))
            # loss = F.cross_entropy(prediction, label)
            loss = F.cross_entropy(prediction, target)
            loss.backward()
            optimizer.step()

            step += 1
            if step % display_interval == 0:
                print('    ==> Iter: ' + str(step) + ' Loss: ' + str(loss))

            total_loss += loss.data.numpy() * len(inputs_statement)

        print('  ==> Epoch ' + str(epoch_) + ' finished. Avg Loss: ' +
              str(total_loss / len(train_data)))

        val_acc = valid(valid_loader, word2num, model, max_len_statement,
                        max_len_subject, max_len_speaker_pos, max_len_context,
                        use_cuda)
        lr_scheduler.step(val_acc)
        for param_group in optimizer.param_groups:
            print("The current learning rate used by the optimizer is : {}".
                  format(param_group['lr']))

        if val_acc > optimal_val_acc:
            optimal_val_acc = val_acc
            model_file = os.path.join(
                model_path,
                'model_bs_{}_lr_{}_acc_{}.pth'.format(batch_size, lr, val_acc))
            old_models = [
                os.path.join(model_path, filename)
                for filename in os.listdir(model_path)
                if filename.startswith("model_bs_{}_lr_{}".format(
                    batch_size, lr))
            ]
            for file_ in old_models:
                os.remove(file_)
            torch.save(model.state_dict(), model_file)

    return optimal_val_acc
Esempio n. 10
0
def main():
    seed_init()

    process = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=0.4422, std=0.1931),
    ])

    att_dataset = CustomDataset(PATH, transform=process)
    dataset = list(att_dataset)
    train = dataset[:30]
    test = dataset[30:]

    resnet18 = ResNet()
    print(f"The No. of Parameters in Model are : {count_parameters(resnet18)}")

    torch.set_grad_enabled(True)
    resnet18.train(True)

    learning_rate = LEARNING_RATE
    optimizer = optim.Adam(resnet18.parameters(), lr=learning_rate)
    torch_triplet_loss = nn.TripletMarginLoss()
    if CUDA:
        resnet18 = resnet18.cuda()

    cost = [float('inf')]
    train_acc = [0]
    test_acc = [0]
    epochs = EPOCHS

    #### TRAINING ####
    for epoch in range(epochs):

        triplets = get_random_triplets(train)
        loader = DataLoader(triplets, batch_size=BATCH_SIZE)
        steps = len(loader)
        print("Lenght of Loader:", steps)
        for i, batch in enumerate(loader):

            loss = Train(resnet18, batch, triplet_loss, optimizer, cost)

            pred = Evaluate(resnet18, train)
            acc1 = ((pred == torch.arange(len(pred)).reshape(-1, 1)).sum() /
                    (len(pred) * 10)).item()
            train_acc.append(acc1)

            pred = Evaluate(resnet18, test)
            acc2 = ((pred == torch.arange(len(pred)).reshape(-1, 1)).sum() /
                    (len(pred) * 10)).item()
            test_acc.append(acc2)

            if (i + 1) % 1 == 0:
                print(
                    f'Epoch:[{epoch+1}/{epochs}], Step:[{i+1}/{steps}]',
                    'Cost : {:.2f}, Train Acc: {:.2f}, Test Acc: {:.2f}'.
                    format(loss, acc1, acc2))
                # print(f'Epoch:[{epoch+1}/{epochs}], Step:[{i+1}/87]', 'Cost : {:.2f}'.format(loss))

    plt.figure(figsize=(12, 10))
    plt.title("Learning Curves")
    plt.xlabel('Total Iterations')
    plt.ylabel('Cost')
    plt.plot(np.arange(len(cost)), cost, label='cost')
    plt.plot(np.arange(len(train_acc)), train_acc, label='train_acc')
    plt.plot(np.arange(len(test_acc)), test_acc, label='test_acc')
    plt.grid(alpha=0.5)
    plt.legend()
    # plt.savefig('/content/drive/MyDrive/Colab Notebooks/siamese-orl-loss on 30classes(resnet)')
    plt.show()

    #### END TRAINING ####

    torch.save(resnet18.state_dict(), SAVE_PATH)

    torch.set_grad_enabled(False)
    resnet18.train(False)

    test_pred = Evaluate(resnet18, test)
    test_acc = (
        (test_pred == torch.arange(len(test_pred)).reshape(-1, 1)).sum() /
        (len(test_pred) * 10)).item()

    train_pred = Evaluate(resnet18, train)
    train_acc = (
        (train_pred == torch.arange(len(train_pred)).reshape(-1, 1)).sum() /
        (len(train_pred) * 10)).item()

    total_pred = Evaluate(resnet18, dataset)
    total_acc = (
        (total_pred == torch.arange(len(total_pred)).reshape(-1, 1)).sum() /
        (len(total_pred) * 10)).item()

    print('Train Acc: {:.2f}\nTest Acc: {:.2f}\nTotal Acc: {:.2f}'.format(
        train_acc, test_acc, total_acc))
Esempio n. 11
0
    args = vars(parser.parse_args())
    logging.info(args)

    # Set seed to combat random effects
    model = torch.load(args["model_name"])

    snts, metaphoricity_labels, novelty_labels = [], [], []
    with open("data/met_hippocorpus.csv") as csvfile:
        corpus = csv.reader(csvfile, delimiter=',')
        corpus.readline()
        for row in corpus:
            sentence = row[0].split()
            snts.append(sentence)
            metaphoricity_labels.append([0 for _ in sentence])
            novelty_labels.append([0 for _ in sentence])

    dataset = CustomDataset(list(snts), list(metaphoricity_labels),
                            list(novelty_labels))

    sampler = SequentialSampler(dataset)
    hippocorpus = DataLoader(dataset,
                             batch_size=32,
                             sampler=sampler,
                             collate_fn=CustomDataset.collate_fn)

    # Evaluate the trained model on test data.
    evaluate(model,
             hippocorpus,
             output_filename=args["output"],
             no_labels=True)
Esempio n. 12
0
def train(visualize=True):
    torch.manual_seed(config['seed'])
    torch.cuda.manual_seed(config['seed'])
    random.seed(config['seed'])
    
    device = ("cuda" if torch.cuda.is_available() else "cpu")
    generator = Generator().to(device)
    restorer = Restorer().to(device)
    params = list(generator.parameters()) + list(restorer.parameters())
    # joint optimization
    optimizer = Adam(params, lr=config['lr'], weight_decay=config['weight_decay'])

    # Start from previous session
    if config['checkpoint_path'] is not None:
        checkpoint = torch.load(config['checkpoint_path'])
        generator.load_state_dict(checkpoint["generator"])
        restorer.load_state_dict(checkpoint["restorer"])
        optimizer.load_state_dict(checkpoint["optimize"])
    
    generator.train()
    restorer.train()

    # loss parameters
    alpha = config['loss']['alpha']
    beta = config['loss']['beta']
    gamma = config['loss']['gamma']
    l1_loss = L1Loss().to(device)

    # Dataset
    dataset = CustomDataset()
    dataloader = DataLoader(dataset, batch_size=config['batch_size'], shuffle=True)    

    # Training Loop
    print("Training begins")
    logger = Logger(visualize=visualize) # logs and visdom plots, images
    batch_count, checkpoint_count = 0, 0
    for epoch in range(config['n_epochs']):
        print("Starting Epoch #{}...".format(epoch))
        for batch_i, images in enumerate(dataloader):
            torch.cuda.empty_cache()
            optimizer.zero_grad()
            
            photos = images[0].to(device)
            true_sketches = images[1].to(device)
            deteriorated_sketches = images[2].to(device)

            generated_sketches = generator(photos)
            corrected_sketches = restorer(generated_sketches)
            corrected_deteriorated = restorer(deteriorated_sketches)
            
            loss_base = loss_w1(generated_sketches, true_sketches, gamma) # L_base
            loss_aux = loss_w1(corrected_sketches, true_sketches, gamma) # L_aux
            loss_res = l1_loss(corrected_deteriorated, true_sketches) # L_res
            loss_joint = loss_base + alpha * loss_aux + beta * loss_res # L_joint
            loss_joint.backward()
            optimizer.step()

            # logs batch losses 
            logger.log_iteration(epoch, batch_i, 
                loss_base.item(), 
                loss_res.item(), 
                loss_aux.item(), 
                loss_joint.item()
            )

            if visualize and batch_count % config['sample_interval'] == 0:
                logger.draw( 
                    corrected_sketches[0].data.numpy() * 255, 
                    true_sketches[0].data.numpy() * 255,
                    corrected_deteriorated[0].data.numpy() * 255, 
                    dataset.unnormalize(photos[0]).data.numpy()
                ) # draws a sample on Visdom 

            # checkpoint save
            if batch_count % config['save_checkpoint_interval'] == 0:
                torch.save({
                    "generator": generator.state_dict(), 
                    "restorer": restorer.state_dict(),
                    "optimizer": optimizer.state_dict(),
                    "epoch": epoch
                    }, "{}/checkpoint_{}.pth".format(config['checkpoint_dir'], checkpoint_count))
                checkpoint_count += 1
            batch_count += 1

        if visualize:
            logger.plot_epoch(epoch) # plots the average losses on Visdom
Esempio n. 13
0
    interval_metrics['0.15-0.25'] = [np.mean(mse[np.logical_and(mask <= 0.25, mask > 0.15)]),
                                     np.mean(psnr[np.logical_and(mask <= 0.25, mask > 0.15)])]
    interval_metrics['0.25-0.50'] = [np.mean(mse[np.logical_and(mask <= 0.5, mask > 0.25)]),
                                     np.mean(psnr[np.logical_and(mask <= 0.5, mask > 0.25)])]
    interval_metrics['0.50-1.00'] = [np.mean(mse[mask > 0.5]), np.mean(psnr[mask > 0.5])]
    return interval_metrics

def updateWriterInterval(writer, metrics, epoch):
    for k, v in metrics.items():
        writer.add_scalar('interval/{}-MSE'.format(k), v[0], epoch)
        writer.add_scalar('interval/{}-PSNR'.format(k), v[1], epoch)

if __name__ == '__main__':
    # setup_seed(6)
    opt = TrainOptions().parse()   # get training 
    train_dataset = CustomDataset(opt, is_for_train=True)
    test_dataset = CustomDataset(opt, is_for_train=False)
    train_dataset_size = len(train_dataset)    # get the number of images in the dataset.
    test_dataset_size = len(test_dataset)
    print('The number of training images = %d' % train_dataset_size)
    print('The number of testing images = %d' % test_dataset_size)
    
    train_dataloader = train_dataset.load_data()
    test_dataloader = test_dataset.load_data()
    print('The total batches of training images = %d' % len(train_dataset.dataloader))

    model = create_model(opt)      # create a model given opt.model and other options
    model.setup(opt)               # regular setup: load and print networks; create schedulers
    total_iters = 0                # the total number of training iterations
    writer = SummaryWriter(os.path.join(opt.checkpoints_dir, opt.name))