示例#1
0
def main(iexp, itheory, ifeature, output, fmodel):
    start = time.time()
    L, idset = readTestData(iexp, itheory, ifeature)
    L_idx = [i for i in range(len(L))]
    test_data = DefineTestDataset(L_idx, L)
    device = torch.device("cuda")
    model = mymodel(CNN(), Net())
    model.cuda()
    model = nn.DataParallel(model)
    model.to(device)
    #model.load_state_dict(torch.load('./temp_model_second/epoch148.pt', map_location=lambda storage, loc: storage))
    model.load_state_dict(
        torch.load(fmodel, map_location=lambda storage, loc: storage))
    y_pred = test_model(model, test_data, device)
    print(time.time() - start)

    # f = open('./marine2/OSU_D2_FASP_Elite_02262014_' + str(i) + '.pin')
    # fw=open('./marine2/OSU_rerank_'+str(i)+'.csv','w')
    # fw_test = open('prob' + str(i) + '.txt', 'w')
    fw_test = open(output, 'w')
    count = 0
    for line_id, line in enumerate(idset):
        # if line_id == 0:
        # fw.write('rerank_score' + ',' + line)
        # continue
        # fw.write(str(y_pred[line_id - 1]) + ',' + line)
        if line == False:
            fw_test.write(str(line) + ':' + 'None' + '\n')
        else:
            fw_test.write(line + ':' + str(y_pred[count]) + '\n')
            count += 1
    # f.close()
    # fw.close()
    fw_test.close()
示例#2
0
def train_model(x_train, x_test, L, Y, weight):
    LR = 1e-4
    start_time = time.time()
    train_data = DefineDataset(x_train, L, Y, weight)
    test_data = DefineDataset(x_test, L, Y, weight)
    device = torch.device("cuda")
    model = mymodel(CNN(), Net())
    model.cuda()
    model = nn.DataParallel(model)
    model.to(device)
    # criterion = nn.CrossEntropyLoss(size_average=False)
    #model.load_state_dict(torch.load('./temp_model/epoch54.pt', map_location=lambda storage, loc: storage))
    criterion = my_loss()
    optimizer = torch.optim.Adam(model.parameters(), lr=LR, weight_decay=1e-4)
    best_acc = 0.0
    Train_acc = []
    Test_acc = []
    for epoch in range(0, 150):
        # load the training data in batch
        batch_count = 0
        batch_time = time.time()
        model.train()
        train_loader = Data.DataLoader(train_data,batch_size=6)
        start = time.time()
        for x_batch, y_batch, feature, weight in train_loader:
            end = time.time()
            batch_count = batch_count + 1
            inputs, targets, feature, weight = Variable(x_batch), Variable(y_batch), Variable(feature), Variable(weight)
            inputs, targets, feature, weight = inputs.to(device), targets.to(device), feature.to(device), weight.to(
                device)
            optimizer.zero_grad()
            outputs = model(inputs, feature)  # forward computation
            loss = criterion(outputs, targets, weight)
            # backward propagation and update parameters
            loss.backward()
            optimizer.step()
            # print("batch"+str(batch_count)+" :"+str(get_time_dif(batch_time)))

            # evaluate on both training and test dataset

        train_acc, train_loss, train_Posprec, train_Negprec = evaluate(train_data, model, criterion, device)
        test_acc, test_loss, test_PosPrec, test_Negprec = evaluate(test_data, model, criterion, device)
        if test_acc > best_acc:
            # store the best result
            best_acc = test_acc
            torch.save(model.state_dict(), 'benchmark.pt')
        name = './temp_model/epoch' + str(epoch) + '.pt'
        torch.save(model.state_dict(), name)
        time_dif = get_time_dif(start_time)
        msg = "Epoch {0:3}, Train_loss: {1:>7.2}, Train_acc {2:>6.2%}, Train_Posprec {3:>6.2%}, Train_Negprec {" \
              "4:>6.2%}, " + "Test_loss: {5:>6.2}, Test_acc {6:>6.2%},Test_Posprec {7:6.2%}, Test_Negprec {8:6.2%} " \
                             "Time: {9} "
        print(msg.format(epoch + 1, train_loss, train_acc, train_Posprec, train_Negprec, test_loss, test_acc,
                         test_PosPrec, test_Negprec, time_dif))
        Train_acc.append(train_acc)
        Test_acc.append(test_acc)
    # torch.save(model.state_dict(), 'cnn_pytorch.pt')
    test_model(model, test_data, device)
    return Test_acc, Train_acc
示例#3
0
def main(_):

    tfconfig = tf.ConfigProto(allow_soft_placement=True,log_device_placement=False)
    tfconfig.gpu_options.allow_growth = True
    with tf.Session(config=tfconfig) as sess:
        if args.model_def=='RDN':
            model = mymodel(sess, args)
            model.build_model_inference_2()
            t_vars = tf.trainable_variables()
            # slim.model_analyzer.analyze_vars(t_vars, print_info=True)
            
            model.inference_2('Set12') 
            model.inference_2('BSD68') 
            model.inference_2('Urban100')
        else:
            model = mymodel(sess, args)
            model.build_model_inference()
            t_vars = tf.trainable_variables()
            # slim.model_analyzer.analyze_vars(t_vars, print_info=True)
            
            model.inference('Set12') 
            model.inference('BSD68') 
            model.inference('Urban100')
示例#4
0
def main():
    words, vocab = readData(args.data + '/train.txt', args.vsize)
    inverted_vocab = invert_vocab(vocab)
    vocab_size = len(vocab.keys())
    print("Vocabulary size : " + str(vocab_size))
    train_data = batchify(words, args.bsz)
    print train_data.size()
    with open('vocab.pickle', 'wb') as handle:
        pickle.dump(vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)

    if args.model == 'mymodel':
        rnn = mymodel('LSTM', vocab_size, args.nhid, args.nhid, 2,
                      0.2).to(device)
    else:
        rnn = pymodel('LSTM', vocab_size, args.nhid, args.nhid, 2,
                      0.2).to(device)
    learning_rate = args.lr

    running_loss = 0
    num_epochs = 40
    start_time = timeit.default_timer()
    training_loss = []
    dev_loss = []
    dev_perplexity = []
    test_loss = []
    prev_dev_perplexity = 9999999999

    k = 0
    for e in range(num_epochs):
        for batch, i in enumerate(range(0, train_data.size(0) - 1, bptt)):
            data, targets = get_batch_pytorch(train_data, i)
            hidden = rnn.initHidden(args.bsz, device)
            train(rnn, hidden, data, targets)
            k += 1
        elapsed = timeit.default_timer() - start_time
        print('##################')
        print('Epoch %d :' % e)
        print('Time elapsed : %s' % (get_readable_time(int(elapsed))))
        loss, perp = evaluate(rnn, vocab)

        #dev_loss.append(loss)
        #dev_perplexity.append(perp)
        print('Validation loss : %.1f' % loss)
        print('Validation perplexity : %.1f' % perp)
        generate(rnn, vocab_size, inverted_vocab)
        with open('model.pt', 'wb') as f:
            torch.save(rnn, f)
示例#5
0
device = torch.device("cpu")
if use_cuda:
    device = get_default_device()

train_dataset = Data(train=True)
validation_dataset = Data(train=False)
print("done")

# creat train/validation loader
batch_size = 100
train_loader = DataLoader(dataset=train_dataset, batch_size=args.batch_size)
validation_loader = DataLoader(dataset=validation_dataset,
                               batch_size=args.batch_size)

# Call the model
model = mymodel("resnet18")  #models.resnet18(pretrained = True)
model = model.to(device)
model

# trasfer learning(freezed the parameters)
for param in model.parameters():
    param.requires_grad = False

# Modify the last layer of the model
n_hidden = 512
n_out = 2

model.fc = nn.Linear(n_hidden, n_out)

# Loss function
criterion = nn.CrossEntropyLoss()
示例#6
0
        ]))
    test_iter = torch.utils.data.DataLoader(dataset=testdata,
                                            batch_size=16,
                                            num_workers=8)

    sampler = dataloader.RandomSampling(num=16, interval=1)
    traindata = dataloader.VideoIter(
        video_prefix='./raw/data/',
        txt_list='./raw/list_cvt/trainlist01.txt',
        cached_info_path='./raw/cached_train_video_info.txt',
        sampler=sampler,
        return_item_subpath=False,
        clips_num=1,
        name='train',
        video_transform=transforms.Compose([
            transforms.Resize((128, 171)),
            transforms.RandomCrop((112, 112)),
            transforms.ToTensor(),
        ]))
    train_iter = torch.utils.data.DataLoader(dataset=traindata,
                                             batch_size=16,
                                             num_workers=8)
    net = C3D()
    net.init_weight()
    model = mymodel(net,
                    test_loader=test_iter,
                    train_loader=train_iter,
                    epoch_nums=40,
                    checkpoint_path='./record/',
                    test_clips=1)
    model.run()
示例#7
0
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]

data_transforms = transforms.Compose(transforms_op)

image_dataset = datasets.ImageFolder(data_dir, transform=data_transforms)
dataloader = torch.utils.data.DataLoader(image_dataset, batch_size=32, shuffle=True, num_workers=0)
inputs, classes = next(iter(dataloader))
classes_num = len(image_dataset.classes)
dataset_size = len(image_dataset)

use_gpu = torch.cuda.is_available()
# print(use_gpu)

# model = mymodel(classes_num)
model = mymodel(120)
if use_gpu:
    model.cuda()
if restore_net:
    model.load_state_dict(torch.load(os.path.join(weights_path,'net_23.pth')))

criterion = torch.nn.CrossEntropyLoss()
lr = 0.1
optimizer = optim.SGD(model.parameters(),lr)

def adjust_learning_rate(optimizer, lr, epoch):

    """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""

    lr = lr * (0.1 ** (epoch // 10))
示例#8
0
weights_path = './data/weights'
class_path = './data/pytorch'

classes = os.listdir(class_path)
classes.sort()
use_gpu = torch.cuda.is_available()

transforms_op = [
    transforms.Resize((400, 200), interpolation=3),
    # transforms.RandomHorizontalFlip(p=0.5),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
data_transforms = transforms.Compose(transforms_op)

model = mymodel(len(classes))
model.load_state_dict(torch.load(os.path.join(weights_path, 'net_23.pth')))
if use_gpu:
    model.cuda()
model.train(False)

for name in os.listdir(data_dir):
    img = Image.open(os.path.join(data_dir, name))
    img = img.convert('RGB')
    img = data_transforms(img)
    img = img.unsqueeze(0)
    if use_gpu:
        img = Variable(img.cuda())
    else:
        img = Variable(img)
    output = model(img)
def main():
    # parse args
    global args
    args = Options().args

    # copy all files from experiment
    cwd = os.getcwd()
    for ff in glob.glob("*.py"):
        copy2(os.path.join(cwd, ff), os.path.join(args.folder, 'code'))

    # initialise seeds
    torch.manual_seed(1000)
    torch.cuda.manual_seed(1000)
    np.random.seed(1000)

    # choose cuda
    if args.cuda == 'auto':
        import GPUtil as GPU
        GPUs = GPU.getGPUs()
        idx = [GPUs[j].memoryUsed for j in range(len(GPUs))]
        print(idx)
        assert min(idx) < 11.0, 'All {} GPUs are in use'.format(len(GPUs))
        idx = idx.index(min(idx))
        print('Assigning CUDA_VISIBLE_DEVICES={}'.format(idx))
        os.environ["CUDA_VISIBLE_DEVICES"] = str(idx)
    else:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(args.cuda)

    # parameters
    sigma = float(args.s)
    temperature = float(args.t)
    gradclip = int(args.gc)
    npts = int(args.npts)
    bSize = int(args.bSize)
    angle = float(args.angle)
    flip = eval(str(args.flip))
    tight = int(args.tight)

    model = mymodel(sigma=sigma,
                    temperature=temperature,
                    gradclip=gradclip,
                    npts=npts,
                    option=args.option,
                    size=args.size,
                    path_to_check=args.checkpoint)

    plotkeys = ['input', 'target', 'generated']
    losskeys = list(model.loss.keys())

    # define plotters
    global plotter
    if not args.visdom:
        print('No Visdom')
        plotter = None
    else:
        from torchnet.logger import VisdomPlotLogger, VisdomLogger, VisdomSaver, VisdomTextLogger
        experimentsName = str(args.visdom)
        plotter = dict.fromkeys(['images', 'losses'])
        plotter['images'] = dict([(key,
                                   VisdomLogger("images",
                                                port=int(args.port),
                                                env=experimentsName,
                                                opts={'title': key}))
                                  for key in plotkeys])
        plotter['losses'] = dict([(key,
                                   VisdomPlotLogger("line",
                                                    port=int(args.port),
                                                    env=experimentsName,
                                                    opts={
                                                        'title': key,
                                                        'xlabel': 'Iteration',
                                                        'ylabel': 'Loss'
                                                    })) for key in losskeys])

    # prepare average meters
    global meters, l_iteration
    meterskey = ['batch_time', 'data_time']
    meters = dict([(key, AverageMeter()) for key in meterskey])
    meters['losses'] = dict([(key, AverageMeter()) for key in losskeys])
    l_iteration = float(0.0)

    # plot number of parameters
    params = sum([
        p.numel()
        for p in filter(lambda p: p.requires_grad, model.GEN.parameters())
    ])
    print('GEN # trainable parameters: {}'.format(params))
    params = sum([
        p.numel()
        for p in filter(lambda p: p.requires_grad, model.FAN.parameters())
    ])
    print('FAN # trainable parameters: {}'.format(params))

    # define data
    video_dataset = SuperDB(path=args.data_path,
                            sigma=sigma,
                            size=args.size,
                            flip=flip,
                            angle=angle,
                            tight=tight,
                            db=args.db)
    videoloader = DataLoader(video_dataset,
                             batch_size=bSize,
                             shuffle=True,
                             num_workers=int(args.num_workers),
                             pin_memory=True)
    print('Number of workers is {:d}, and bSize is {:d}'.format(
        int(args.num_workers), bSize))

    # define optimizers
    lr_fan = args.lr_fan
    lr_gan = args.lr_gan
    print('Using learning rate {} for FAN, and {} for GAN'.format(
        lr_fan, lr_gan))
    optimizerFAN = torch.optim.Adam(model.FAN.parameters(),
                                    lr=lr_fan,
                                    betas=(0, 0.9),
                                    weight_decay=5 * 1e-4)
    schedulerFAN = torch.optim.lr_scheduler.StepLR(optimizerFAN,
                                                   step_size=args.step_size,
                                                   gamma=args.gamma)
    optimizerGEN = torch.optim.Adam(model.GEN.parameters(),
                                    lr=lr_gan,
                                    betas=(0, 0.9),
                                    weight_decay=5 * 1e-4)
    schedulerGEN = torch.optim.lr_scheduler.StepLR(optimizerGEN,
                                                   step_size=args.step_size,
                                                   gamma=args.gamma)
    myoptimizers = {'FAN': optimizerFAN, 'GEN': optimizerGEN}

    # path to save models and images
    path_to_model = os.path.join(args.folder, args.file)

    # train
    for epoch in range(0, 80):
        schedulerFAN.step()
        schedulerGEN.step()
        train_epoch(videoloader, model, myoptimizers, epoch, bSize)
        model._save(path_to_model, epoch)