コード例 #1
0
ファイル: train.py プロジェクト: LaplandSnowy/aspect
def train():
    batch_size = 64
    # maxlen = 36
    data_loader = Data_Loader(batch_size)
    maxlen = data_loader.maxlen
    model = Model(data_loader.emb_mat, num_class=3, drop_out=0.5).to(device)
    optimizer = torch.optim.Adam(
        model.parameters(),
        lr=0.0001)  ###############learning rate is important
    epochs = 100
    for i in range(epochs):
        data_loader.reset_pointer()
        num_batch = int(data_loader.train_size / batch_size)
        for b in range(num_batch + 1):
            input_data, mask_data, y_data = data_loader.__next__()

            loss = model(input_data.to(device), maxlen, mask_data.to(device),
                         y_data.to(device))

            optimizer.zero_grad()

            loss.backward()

            torch.nn.utils.clip_grad_norm(model.parameters(), 1.)

            optimizer.step()

            sys.stdout.write("\repoch:{}, batch:{}, loss:{}".format(
                i, b, loss))
コード例 #2
0
def test():
    config = Config(max_feature=200000, maxlen=400, embedding_dims=300,
                    embedding_file='/home/hegx/Research/Quora_Question_Pairs/data/glove.840B.300d.txt')
    data_loader = Data_Loader()
    pre_process = Pre_Process()
    X, y, word_index = pre_process.process_from_file(config, data_loader)
    config.get_embedding_matrix(word_index)
コード例 #3
0
def main(config):
    cudnn.benchmark = True

    print("Loading data...")
    data_loader = Data_Loader(config.train,
                              config.dataset,
                              config.imsize,
                              config.batch_size,
                              config.image_path,
                              shuf=config.train)
    print('Done.')

    # Create directories if these do not exist
    for _subdir in ['gen', 'gen_avg', 'gen_ema', 'gen_ema_slow']:
        make_folder(config.model_save_path, _subdir)
        make_folder(config.sample_path, _subdir)
    make_folder(config.log_path)
    make_folder(config.best_path)
    if config.backup_freq > 0:
        make_folder(config.bup_path)
    if config.dataset == 'imagenet' and config.fid_freq > 0:
        make_folder(config.metrics_path)

    # Train
    trainer = Trainer(data_loader.loader(), config)
    trainer.train()
コード例 #4
0
def main(config):
    # For fast training
    cudnn.benchmark = True

    # Data loader
    data_loader = Data_Loader(config.train,
                              config.dataset,
                              config.image_path,
                              config.imsize,
                              config.batch_size,
                              shuf=config.train)

    # Create directories if not exist
    make_folder(config.model_save_path, config.version)
    make_folder(config.sample_path, config.version)
    make_folder(config.log_path, config.version)
    make_folder(config.attn_path, config.version)

    if config.train:
        if config.model == 'sagan':
            trainer = Trainer(data_loader.loader(), config)
        elif config.model == 'qgan':
            trainer = qgan_trainer(data_loader.loader(), config)
        trainer.train()
    else:
        tester = Tester(data_loader.loader(), config)
        tester.test()
コード例 #5
0
def main(config):
    # For fast training
    cudnn.benchmark = True

    config.n_class = len(glob.glob(os.path.join(config.image_path, '*/')))
    print('number class:', config.n_class)
    # Data loader
    data_loader = Data_Loader(config.train,
                              config.dataset,
                              config.image_path,
                              config.imsize,
                              config.batch_size,
                              shuf=config.train)

    # Create directories if not exist
    make_folder(config.model_save_path, config.version)
    make_folder(config.sample_path, config.version)
    make_folder(config.log_path, config.version)
    make_folder(config.attn_path, config.version)

    print('config data_loader and build logs folder')

    #TODO:为什么不是biggan? -Ans:基于某种网络改进
    if config.train:
        if config.model == 'sagan':
            trainer = Trainer(data_loader.loader(), config)
        elif config.model == 'qgan':
            trainer = qgan_trainer(data_loader.loader(), config)
        trainer.train()
    else:
        tester = Tester(data_loader.loader(), config)
        tester.test()
 def _find_is(self):
     data_loader = Data_Loader('images',
                               self.eval_dir,
                               self.dataloader.imsize,
                               self.batch_size,
                               shuffle=False)
     _is = inception_score(data_loader.loader(), self.device, True, 10)
     self.summary['inception_score'].append((self.iter, _is))
     self.summary['inception_done'].append(self.iter)
コード例 #7
0
ファイル: train_ad.py プロジェクト: zkzt/GOAD
def load_trans_data(args, trans):
    dl = Data_Loader()
    x_train, x_test, y_test = dl.get_dataset(args.dataset,
                                             true_label=args.class_ind)
    x_train_trans, labels = transform_data(x_train, trans)
    x_test_trans, _ = transform_data(x_test, trans)
    x_test_trans, x_train_trans = x_test_trans.transpose(
        0, 3, 1, 2), x_train_trans.transpose(0, 3, 1, 2)
    y_test = np.array(y_test) == args.class_ind
    return x_train_trans, x_test_trans, y_test
コード例 #8
0
ファイル: predict.py プロジェクト: FrankBlood/Deep-Regression
def predict(bst_model_path):
    data_loader = Data_Loader()
    val_data, val_label = data_loader.load(data_loader.validation_path)

    model = first_model()
    # model = conv_model()
    # model = tmp()

    y = model.predict(val_data)
    return y
def main(config):
    dataset = config.dataset
    path = config.data_path
    img_size = config.img_size
    out_dir = os.path.join(config.index_path, dataset)
    batch_size = config.batch_size
    if dataset == 'lsun':
        dataloader = Data_Loader(
            dataset,
            path,
            img_size,
            batch_size,
            classes=['bedroom_train', 'bridge_train', 'church_outdoor_train'],
            shuffle=False)
    else:
        dataloader = Data_Loader(dataset,
                                 path,
                                 img_size,
                                 batch_size,
                                 shuffle=False)

    class_dict = {}

    device = torch.device(config.device)
    model = InceptionV3().to(device)

    data_iter = dataloader.loader()

    index = get_index()

    for batch_idx, (images, labels) in enumerate(data_iter):
        batch = images.to(device)
        preds = model(batch)[0]
        preds = preds.cpu().numpy().reshape((preds.shape[0], preds.shape[1]))
        index.addDataPointBatch(
            preds, range(batch_idx * batch_size, (batch_idx + 1) * batch_size))
        class_dict = save_images(images, labels, batch_idx, batch_size,
                                 class_dict, out_dir)

    create_index(index, os.path.join(out_dir, 'index.bin'))
    with open(os.path.join(out_dir, 'class_dict.json'), 'w') as fp:
        json.dump(class_dict, fp)
 def _find_fid(self):
     data_loader = Data_Loader('images',
                               self.eval_dir,
                               self.dataloader.imsize,
                               self.batch_size,
                               shuffle=False)
     _fid = fid_score(self.dataloader.loader(),
                      self.dataset,
                      data_loader.loader(),
                      device=self.device)
     self.summary['fid'].append((self.iter, _fid))
     self.summary['fid_done'].append(self.iter)
コード例 #11
0
def load_trans_data(args, trans):
    dl = Data_Loader()
    x_train, x_test, y_test = dl.get_dataset(args.dataset,
                                             true_label=args.class_ind,
                                             flip_ones_and_zeros=args.flip)
    print("Computing transformed data for train data")
    x_train_trans, labels = transform_data(x_train, trans)
    print("Computing transformed data for test data")
    x_test_trans, _ = transform_data(x_test, trans)
    x_test_trans, x_train_trans = x_test_trans.transpose(0, 3, 1, 2), x_train_trans.transpose(0, 3, 1, 2)
    y_test = np.array(y_test) == args.class_ind
    return x_train_trans, x_test_trans, y_test
コード例 #12
0
def train(config):
    data_loader = Data_Loader()
    pre_process = Pre_Process()
    # X_train, y_train, X_test, y_test = pre_process.process(config, data_loader)
    X_train, y_train, X_test, y_test, word_index = pre_process.process_from_file(
        config, data_loader)
    embedding_matrix = config.get_embedding_matrix(word_index)

    print('Train...')

    if config.model_name == 'bidirectional_lstm':
        model = bidirectional_lstm(config)

    elif config.model_name == 'cnn':
        model = cnn(config)

    elif config.model_name == 'cnn_lstm':
        model = cnn_lstm(config)

    elif config.model_name == 'lstm':
        model = lstm(config)

    elif config.model_name == 'cnn_based_rnn':
        model = cnn_based_rnn(config, embedding_matrix)

    else:
        print("What the F**K!")
        return

    early_stopping = EarlyStopping(monitor='val_acc', patience=3)
    now_time = '_'.join(time.asctime(time.localtime(time.time())).split(' '))
    bst_model_path = './models/' + config.model_name + '_' + now_time + '.h5'
    print('bst_model_path:', bst_model_path)
    model_checkpoint = ModelCheckpoint(bst_model_path,
                                       monitor='val_acc',
                                       save_best_only=True,
                                       save_weights_only=True)

    model.fit(X_train,
              y_train,
              batch_size=config.batch_size,
              nb_epoch=config.nb_epoch,
              shuffle=True,
              validation_data=[X_test, y_test],
              callbacks=[early_stopping, model_checkpoint])

    if os.path.exists(bst_model_path):
        model.load_weights(bst_model_path)

    print('test:', model.evaluate(X_test, y_test,
                                  batch_size=config.batch_size))
コード例 #13
0
def main():
    # print("something")
    ml_algorithm = sys.argv[1]  #cnn or svm
    ml_step = sys.argv[2]  #train or test
    data_format = sys.argv[3]  #image or file
    data_path = './data'
    test_size_ratio = 0.1
    loader = Data_Loader(data_path)
    # unshuffled split of data to train and test
    class_data_count = 1500
    [train_img, train_labels, test_img, test_labels] = [
        np.array(x) for x in loader.load_all_data(test_size_ratio, data_format,
                                                  class_data_count)
    ]

    if ml_algorithm == "svm":
        svm_classifier = SVM(train_img, train_labels, test_img, test_labels)
        svm_classifier.plots()
    elif ml_algorithm == "cnn":
        print("starting CNN!")
        b_size = int(sys.argv[4])
        num_epochs = int(sys.argv[5])
        cnn_classifier = CNN1(train_img, train_labels, test_img, test_labels,
                              b_size, num_epochs)
        if ml_step == "test":
            accuracy = cnn_classifier.test()
        elif ml_step == "train":
            accuracy = cnn_classifier.train()
        elif ml_step == "predict":
            file_path = input('hi')
            img = np.asarray(
                Image.open(file_path).convert('L').resize(
                    (45, 45), Image.ANTIALIAS)).flatten()
            features = []
            features.append(img / 255.0)
            test_img = np.array(features)
            # print(test_img.shape)
            img_rows, img_columns = 45, 45
            test_data = test_img.reshape(
                (test_img.shape[0], img_rows, img_columns))
            test_data = test_data[:, np.newaxis, :, :]
            # print(test_data.shape)
            prediction = cnn_classifier.predict(test_data[np.newaxis, 0])
            count = 0
            feature_map = {}
            for folder in os.listdir("./data"):
                # print(folder+":"+str(count))
                feature_map[count] = folder
                count += 1
            print(feature_map[prediction[0]])
        return
コード例 #14
0
ファイル: main.py プロジェクト: plodha/CMPE-297-DeepLearning
def main(config):
    # For fast training
    cudnn.benchmark = True

    # Data loader
    data_loader = Data_Loader(config.train,
                              config.dataset,
                              config.mura_class,
                              config.mura_type,
                              config.image_path,
                              config.imsize,
                              config.batch_size,
                              shuffle=config.train)
    """
    train = MURASubset(filenames=splitter.data_train.path, patients=splitter.data_train.patient,
                       transform=composed_transforms, true_labels=np.zeros(len(splitter.data_train.path)))
    validation = MURASubset(filenames=splitter.data_val.path, true_labels=splitter.data_val.label,
                            patients=splitter.data_val.patient, transform=composed_transforms_val)
    test = MURASubset(filenames=splitter.data_test.path, true_labels=splitter.data_test.label,
                      patients=splitter.data_test.patient, transform=composed_transforms_val)

    train_loader = DataLoader(train, batch_size=run_params['batch_size'], shuffle=True, num_workers=num_workers,
                              worker_init_fn=loader_init_fn, drop_last=model_class in [DCGAN])
    val_loader = DataLoader(validation, batch_size=run_params['batch_size'], shuffle=True, num_workers=num_workers,
                            drop_last=model_class in [DCGAN])
    test_loader = DataLoader(test, batch_size=run_params['batch_size'], shuffle=True, num_workers=num_workers,
                             drop_last=model_class in [DCGAN])
    """

    # for batch_data in tqdm(data_loader, desc='Training', total=len(data_loader)):
    #     print('batch_data ',batch_data)

    # Create directories if not exist
    make_folder(config.model_save_path, config.version)
    make_folder(config.sample_path, config.version)
    make_folder(config.log_path, config.version)
    make_folder(config.attn_path, config.version)

    if config.train:
        if config.model == 'sagan':
            trainer = Trainer(data_loader.loader(), config)
        elif config.model == 'qgan':
            trainer = qgan_trainer(data_loader.loader(), config)
        elif config.model == 'alpha':
            trainer = alpha_Trainer(data_loader.loader(), config)
        print('calling the trainer')
        trainer.train()
    else:
        tester = Tester(data_loader.loader(), config)
        tester.test()
コード例 #15
0
def load_trans_data(args):
    dl = Data_Loader()
    train_real, val_real, val_fake = dl.get_dataset(args.dataset, args.c_pr)
    y_test_fscore = np.concatenate([np.zeros(len(val_real)), np.ones(len(val_fake))])
    ratio = 100.0 * len(val_real) / (len(val_real) + len(val_fake))

    n_train, n_dims = train_real.shape
    rots = np.random.randn(args.n_rots, n_dims, args.d_out)

    print('Calculating transforms')
    x_train = np.stack([train_real.dot(rot) for rot in rots], 2)
    val_real_xs = np.stack([val_real.dot(rot) for rot in rots], 2)
    val_fake_xs = np.stack([val_fake.dot(rot) for rot in rots], 2)
    x_test = np.concatenate([val_real_xs, val_fake_xs])
    return x_train, x_test, y_test_fscore, ratio
コード例 #16
0
        def train(self):
                prev_accuracy=0
                if self.N==2:
                        l=[self.marathi_data_loader,self.hindi_data_loader,self.magahi_data_loader,self.sanskrit_data_loader,
                          self.english_data_loader,self.german_data_loader,self.danish_data_loader]
                else:
                        l=[self.hindi_data_loader,self.magahi_data_loader,self.sanskrit_data_loader,
                          self.english_data_loader,self.german_data_loader]
                add=1
                for epoch in range(self.epochs):
                        
                        fast_weights=OrderedDict((name,param) for (name,param) in self.encoder.named_parameters())
                        ls=[]
                        random.shuffle(l)
                        
                        if self.N==4:
                                data_loader1=Data_Loader(l[:self.N],self.N,self.K)
                                data_loader3=Data_Loader(l[1:],self.N,self.K)
                        else:
                                data_loader1=Data_Loader(l[:self.N],self.N,self.K)
                                data_loader2=Data_Loader(l[self.N:self.N*2],self.N,self.K,examples=2)
                                data_loader3=Data_Loader(l[2*self.N:self.N*3],self.N,self.K)
                                data_loader4=Data_Loader([l[0],l[self.N*3]],self.N,self.K,examples=2)

                        grads1,loss1=self.fast_net.train(fast_weights,data_loader1,self.N,self.K)
                        if self.N==2:
                                grads1_prime,loss1_prime=self.fast_net.train(fast_weights,data_loader2,self.N,self.K)
                                ls.append(grads1_prime)
                        ls.append(grads1)
                        grads={k: sum(d[k] for d in ls) for k in ls[0].keys()}
                        self.meta_update1(grads,data_loader1)

                        fast_weights=OrderedDict((name,param) for (name,param) in self.encoder.named_parameters())
                        ls=[]

                        grads2,loss2=self.fast_net.train(fast_weights,data_loader3,self.N,self.K)
                        if self.N==2:
                                grads2_prime,loss2_prime=self.fast_net.train(fast_weights,data_loader4,self.N,self.K)
                                ls.append(grads2_prime)      
                        ls.append(grads2)            
                        grads={k: sum(d[k] for d in ls) for k in ls[0].keys()}
                        self.meta_update1(grads,data_loader3)
                        
                        if self.N==4:
                                loss1_prime=torch.tensor([0])
                                loss2_prime=torch.tensor([0])
                        print('epoch=',epoch+add,'training loss=',(loss1.item()+loss1_prime.item())/(self.N*self.K*self.mb))
                        print('epoch=',epoch+add+1,'training loss=',(loss2.item()+loss2_prime.item())/(self.N*self.K*self.mb))

                        add+=1
                        if (epoch+1)%5==0:
                                a,b=self.test()
                                if b>prev_accuracy:
                                        torch.save(self.encoder.state_dict(),'model_MAML_'+str(self.N)+'_way_'+str(self.K)+'_shot'+'.pth')
                                        prev_accuracy=b
コード例 #17
0
        def test(self,t=2,num=40):
                accuracy_final1=0
                accuracy_final2=0

                for _ in range(num):
                        fast_weights=OrderedDict((name,param) for (name,param) in self.encoder.named_parameters())
                        self.fast_net.clone_weights_for_test(fast_weights)
                        train_optimizer=optim.Adam(self.fast_net.parameters(),lr=self.lr)
                        
                        if self.N==2:
                                loaders=[self.bhojpuri_data_loader,self.dutch_data_loader]
                        else:
                                loaders=[self.bhojpuri_data_loader,self.dutch_data_loader,self.danish_data_loader,self.marathi_data_loader]
                        random.shuffle(loaders)
                        data_loader=Data_Loader(loaders,self.N,K=self.K,examples=2)

                        for _ in range(self.inner_epoch):
                                train_optimizer.zero_grad()
                                loss=0
                                for _ in range(self.N*self.K):
                                        x_test,y_test,sentence=data_loader.load_next(reuse=True)
                                        loss+=self.fast_net.test_train(sentence,x_test,y_test)
                                loss.backward()
                                train_optimizer.step()
                        
                        data_loader.set_counter()
                        a,b,c=0,0,0
                        for _ in range(t):
                                x_test,y_test,sentence1=data_loader.load_next()
                                score,outputprime=self.fast_net.forward(x_test,sentence1)
                                
                                j,count=0,0
                                for i in range(len(sentence1)):
                                        c+=1
                                        if outputprime[j]==y_test[j]:
                                                count+=1
                                                b+=1
                                        j+=1
                                
                                accuracy=100*count/j
                                a+=accuracy
                                
                        accuracy_final1+=a/t
                        accuracy_final2+=b*100/c
                print('validation accuracy over sentences=',accuracy_final1/num,'validation accuracy over tags=',accuracy_final2/num)

                return accuracy_final1/num,accuracy_final2/num
コード例 #18
0
ファイル: main.py プロジェクト: rv-gonela/MaskGAN
def main(config):
    # For fast training
    cudnn.benchmark = True

    if config.train:

    # Create directories if not exist
        make_folder(config.model_save_path, config.version)
        make_folder(config.sample_path, config.version)
        make_folder(config.log_path, config.version)

        data_loader = Data_Loader(config.img_path, config.label_path, config.imsize, config.batch_size, config.train)
        trainer = Trainer(data_loader.loader(), config)
        trainer.train()
    else:
        tester = Tester(config)
        tester.test()
コード例 #19
0
def train():
    data_loader = Data_Loader()
    data, label = data_loader.new_load(data_loader.train_path)
    val_data, val_label = data_loader.new_load(data_loader.validation_path)
    test_data, test_label = data_loader.new_load(data_loader.test_path)

    # model = first_model()
    # model = q_model()
    model = resnet_model()
    #model = h_resnet_model()
    # model = deeper_model()
    #model = baseline_model()
    # model = tmp()

    early_stopping = EarlyStopping(monitor='val_loss', patience=3)
    now_time = '_'.join(time.asctime(time.localtime(time.time())).split(' '))
    bst_model_path = '../models/' + 'qstd_resnet_model' + '_' + now_time + '.h5'
    print('bst_model_path:', bst_model_path)
    model_checkpoint = ModelCheckpoint(bst_model_path,
                                       monitor='val_loss',
                                       save_best_only=True,
                                       save_weights_only=True)

    tb_cb = TensorBoard(log_dir='./q_resnet_tensorboard/',
                        histogram_freq=1,
                        write_graph=True,
                        write_images=False,
                        embeddings_freq=0,
                        embeddings_layer_names=None,
                        embeddings_metadata=None)

    model.fit(data,
              label,
              batch_size=32,
              epochs=30,
              shuffle=True,
              validation_data=[test_data, test_label],
              callbacks=[model_checkpoint, tb_cb])
    # callbacks=[early_stopping, model_checkpoint, tb_cb])

    if os.path.exists(bst_model_path):
        model.load_weights(bst_model_path)

    test_loss, test_acc = model.evaluate(val_data, val_label)
    print(test_loss, test_acc)
コード例 #20
0
ファイル: main.py プロジェクト: houze-liu/AttnGAN
def main(config):
    # if input size remains unchanged then
    # enabling it will leads to faster runtime;
    cudnn.benchmark = True

    # Data loader
    data_loader = Data_Loader(config.dataset,
                              config.image_path,
                              config.imsize,
                              config.batch_size,
                              shuf=config.train)

    # Create directories if not exist
    make_folder(config.model_save_path, config.version)
    make_folder(config.sample_path, config.version)

    trainer = Trainer(data_loader.loader(), config)
    trainer.train()
コード例 #21
0
 def test(self):
     config = Config(
         max_feature=200000,
         maxlen=400,
         embedding_dims=300,
         embedding_file=
         '/home/irlab0/Research/kaggle/Quora_Question_Pairs/data/glove.840B.300d.txt',
         trian_path=
         '/home/irlab0/Research/TextClassification/imdb/data/aclImdb/train/',
         test_path=
         '/home/irlab0/Research/TextClassification/imdb/data/aclImdb/test/')
     data_loader = Data_Loader()
     # X_train, y_train, X_test, y_test = self.process(config, data_loader)
     X_train, y_train, X_test, y_test, word_index = self.process_from_file(
         config, data_loader)
     config.get_embedding_matrix(word_index)
     print(X_train[0])
     print(y_train[0])
コード例 #22
0
def train():
	batch_size = 32
	neg_size = 4
	data_loader = Data_Loader(batch_size)
	maxlen = data_loader.maxlen
	model = Model(data_loader.emb_mat,
				num_tag = data_loader.num_tag,
				num_class = 3,
				maxlen = maxlen,
				batch_size = batch_size,
				drop_out = 0.5,
				neg_size = neg_size)
	epochs = 100
	best_acc = 0
	with tf.Session() as sess:
		sess.run(tf.global_variables_initializer())
		saver = tf.train.Saver(tf.global_variables())

		for i in range(epochs):
			data_loader.reset_pointer()
			num_batch = int(data_loader.train_size/batch_size)
			for b in range(num_batch+1):
				input_data, input_tag, mask_data, y_data, label_mask = data_loader.__next__()
				# print(input_data.shape, mask_data.shape, y_data.shape)
				input_neg = np.random.randint(1,data_loader.vocab_size, (input_data.shape[0], maxlen, neg_size))
				# print(input_neg)
				y_data = to_categorical(y_data, 3)
				# print(y_data.shape,'uqjwen')
				_,loss = sess.run([model.train_op, model.cost], feed_dict = {model.x:input_data,
																			model.t:input_tag,
																			model.mask:mask_data,
																			model.neg:input_neg,
																			model.labels:y_data})

				sys.stdout.write('\repoch:{}, batch:{}, loss:{}'.format(i,b,loss))
				sys.stdout.flush()

				# break
			acc1, acc2 = val(sess, model, data_loader)
			if acc1>best_acc:
				best_acc = acc1
				saver.save(sess, checkpointer_dir+'model.ckpt', global_step=i)
			print("\nacc1: ",acc1, "acc2: ",acc2)
コード例 #23
0
def main(config):
    cudnn.benchmark = True

    data_loader = Data_Loader(config.train, config.dataset, config.data_dir, config.imsize,
                             config.batch_size, shuf=config.train)

    for _subdir in ['gen', 'gen_avg', 'gen_ema']:
        make_folder(config.model_save_path, _subdir)
        make_folder(config.sample_path, _subdir)

    make_folder(config.log_path)
    if config.backup_freq > 0:
        make_folder(config.bup_path)

    if config.model == 'sagan':
        trainer = Trainer(data_loader.loader(), config)
    elif config.model == 'qgan':
        trainer = qgan_trainer(data_loader.loader(), config)
    trainer.train()
コード例 #24
0
ファイル: train_ad.py プロジェクト: SachinG007/GOAD_aug
def load_trans_data(args, trans):
    dl = Data_Loader()
    x_train, x_test, y_test = dl.get_dataset(args.dataset,
                                             true_label=args.class_ind)
    print("Non Augmented Data Shape: ", x_train.shape)
    #DATA AUGMENTATION
    normal_data_transformer = Transformer_non90(0, 0, 30)
    transformations_inds_aug = np.tile(
        np.arange(normal_data_transformer.n_transforms), len(x_train))
    print("Num Data Augments: ", normal_data_transformer.n_transforms)
    x_train_aug = normal_data_transformer.transform_batch(
        np.repeat(x_train, normal_data_transformer.n_transforms, axis=0),
        transformations_inds_aug)
    print("Augmented Data Shape ", x_train_aug.shape)

    x_train_trans, labels = transform_data(x_train_aug, trans)
    x_test_trans, _ = transform_data(x_test, trans)
    x_test_trans, x_train_trans = x_test_trans.transpose(
        0, 3, 1, 2), x_train_trans.transpose(0, 3, 1, 2)
    y_test = np.array(y_test) == args.class_ind
    return x_train_trans, x_test_trans, y_test
コード例 #25
0
    def __init__(self,
                 base_directory,
                 conv1_layers = 512,
                 fc1_layers = 1024,
                 num_classes = 1001,
                 sen2vec_hidden = 4900,
                 learning_rate = 0.001,
                 n_epochs = 10,
                 batch_size = 32,
                 num_ans = 10,
                 max_questions_words=23,
                 ):

        self.n_epochs = n_epochs
        self.batch_size = batch_size
        self.conv1_layers = conv1_layers
        self.fc1_layers = fc1_layers
        self.num_classes = num_classes
        self.sen2vec_hidden = sen2vec_hidden
        self.num_ans = num_ans
        self.learning_rate = learning_rate
        self.max_question_words = max_questions_words

        self.img2vec = CNN_Loader(base_directory)
        self.pool_length = self.img2vec.pool_length
        self.pool_depth = self.img2vec.pool_depth
        self.F_input_dim = sen2vec_hidden // (self.pool_length * self.pool_length) + self.pool_depth

        self.word2vec = Word2Vec_Loader(base_directory)
        self.word_vec_size = self.word2vec.embedding_size

        vec_shape = [self.pool_length, self.pool_length, sen2vec_hidden // (self.pool_length * self.pool_length)]
        self.sen2vec = Sentence2Vec_Loader(num_hidden= sen2vec_hidden, vec_shape = vec_shape)

        self.data_loader = Data_Loader(base_directory)
        self.num_train_questions = self.data_loader.num_train_questions
        self.num_test_questions = self.data_loader.num_test_questions
        self.eps = 1e-10
コード例 #26
0
def main(config):
    img_size = config.img_size
    batch_size = config.batch_size
    eval_folder = config.eval_folder
    device = config.device
    classes = None
    if config.classes is not None:
        classes = config.classes.split(',')
    elif config.dataset == 'lsun':
        raise Exception('Provide class list. Available options: bedroom_train,bridge_train,church_outdoor_train')
    
    model_type = eval_folder.split('_')[1]
    data_loader = Data_Loader(config.dataset, config.data_path, img_size, batch_size, classes=classes, shuffle=False)
    if model_type == 'dcgan':
        device = torch.device(device)
        sn = 'sn' in eval_folder
        _, generator= get_dcgan(img_size, 3, sn=sn,device=device)
    else:
        gpus = [int(device.split(':')[-1])]
        device = torch.device(device)
        _, generator = get_biggan(data_loader.num_classes, gpus=gpus)
    
    evaluator = Evaluator(generator, data_loader, eval_folder, device, batch_size=batch_size)
    evaluator.run()
コード例 #27
0
    prefix = '/home/wenjh/aHIGAN/Grocery_and_Gourmet_Food/'
    filename = prefix + 'Grocery_and_Gourmet_Food_5.json'

    flags = tf.flags.FLAGS
    tf.flags.DEFINE_string('filename', filename, 'name of file')
    tf.flags.DEFINE_integer('batch_size', 128, 'batch size')
    tf.flags.DEFINE_integer('emb_size', 100, 'embedding size')
    tf.flags.DEFINE_integer('num_class', 5, "num of classes")
    tf.flags.DEFINE_integer('epoch', 10, 'epochs for training')
    tf.flags.DEFINE_string('ckpt_dir',
                           filename.split('.')[0], 'directory of checkpoint')
    tf.flags.DEFINE_string('train_test', 'train', 'training or test')
    # tf.flags.DEFINE_string('base_model', 'att_cnn', 'base model')
    flags(sys.argv)

    data_loader = Data_Loader(flags)

    model = Model(flags, data_loader)

    sess = tf.Session()

    # tf.set_random_seed(1234)
    # np.random.seed(1234)

    sess.run(tf.global_variables_initializer())
    sess.run(model.w_embed.assign(data_loader.w_embed))

    if not os.path.exists(flags.ckpt_dir):
        os.makedirs(flags.ckpt_dir)

    if flags.train_test == 'train':
コード例 #28
0
def train(config):
    summary = SummaryWriter(config.LOG_DIR.log_scalar_train_itr)

    ## inputs
    inputs = {'b_t_1': None, 'b_t': None, 's_t_1': None, 's_t': None}
    inputs = collections.OrderedDict(sorted(inputs.items(),
                                            key=lambda t: t[0]))

    ## model
    print(toGreen('Loading Model...'))
    moduleNetwork = Network().to(device)
    moduleNetwork.apply(weights_init)
    moduleNetwork_gt = Network().to(device)
    print(moduleNetwork)

    ## checkpoint manager
    ckpt_manager = CKPT_Manager(config.LOG_DIR.ckpt, config.mode,
                                config.max_ckpt_num)
    moduleNetwork.load_state_dict(
        torch.load('./network/network-default.pytorch'))
    moduleNetwork_gt.load_state_dict(
        torch.load('./network/network-default.pytorch'))

    ## data loader
    print(toGreen('Loading Data Loader...'))
    data_loader = Data_Loader(config,
                              is_train=True,
                              name='train',
                              thread_num=config.thread_num)
    data_loader_test = Data_Loader(config,
                                   is_train=False,
                                   name="test",
                                   thread_num=config.thread_num)

    data_loader.init_data_loader(inputs)
    data_loader_test.init_data_loader(inputs)

    ## loss, optim
    print(toGreen('Building Loss & Optim...'))
    MSE_sum = torch.nn.MSELoss(reduction='sum')
    MSE_mean = torch.nn.MSELoss()
    optimizer = optim.Adam(moduleNetwork.parameters(),
                           lr=config.lr_init,
                           betas=(config.beta1, 0.999))
    errs = collections.OrderedDict()

    print(toYellow('======== TRAINING START ========='))
    max_epoch = 10000
    itr = 0
    for epoch in np.arange(max_epoch):

        # train
        while True:
            itr_time = time.time()

            inputs, is_end = data_loader.get_feed()
            if is_end: break

            if config.loss == 'image':
                flow_bb = torch.nn.functional.interpolate(
                    input=moduleNetwork(inputs['b_t'], inputs['b_t_1']),
                    size=(config.height, config.width),
                    mode='bilinear',
                    align_corners=False)
                flow_bs = torch.nn.functional.interpolate(
                    input=moduleNetwork(inputs['b_t'], inputs['s_t_1']),
                    size=(config.height, config.width),
                    mode='bilinear',
                    align_corners=False)
                flow_sb = torch.nn.functional.interpolate(
                    input=moduleNetwork(inputs['s_t'], inputs['b_t_1']),
                    size=(config.height, config.width),
                    mode='bilinear',
                    align_corners=False)
                flow_ss = torch.nn.functional.interpolate(
                    input=moduleNetwork(inputs['s_t'], inputs['s_t_1']),
                    size=(config.height, config.width),
                    mode='bilinear',
                    align_corners=False)

                with torch.no_grad():
                    flow_ss_gt = torch.nn.functional.interpolate(
                        input=moduleNetwork_gt(inputs['s_t'], inputs['s_t_1']),
                        size=(config.height, config.width),
                        mode='bilinear',
                        align_corners=False)
                    s_t_warped_ss_mask_gt = warp(tensorInput=torch.ones_like(
                        inputs['s_t_1'], device=device),
                                                 tensorFlow=flow_ss_gt)

                s_t_warped_bb = warp(tensorInput=inputs['s_t_1'],
                                     tensorFlow=flow_bb)
                s_t_warped_bs = warp(tensorInput=inputs['s_t_1'],
                                     tensorFlow=flow_bs)
                s_t_warped_sb = warp(tensorInput=inputs['s_t_1'],
                                     tensorFlow=flow_sb)
                s_t_warped_ss = warp(tensorInput=inputs['s_t_1'],
                                     tensorFlow=flow_ss)

                s_t_warped_bb_mask = warp(tensorInput=torch.ones_like(
                    inputs['s_t_1'], device=device),
                                          tensorFlow=flow_bb)
                s_t_warped_bs_mask = warp(tensorInput=torch.ones_like(
                    inputs['s_t_1'], device=device),
                                          tensorFlow=flow_bs)
                s_t_warped_sb_mask = warp(tensorInput=torch.ones_like(
                    inputs['s_t_1'], device=device),
                                          tensorFlow=flow_sb)
                s_t_warped_ss_mask = warp(tensorInput=torch.ones_like(
                    inputs['s_t_1'], device=device),
                                          tensorFlow=flow_ss)

                optimizer.zero_grad()

                errs['MSE_bb'] = MSE_sum(
                    s_t_warped_bb * s_t_warped_bb_mask,
                    inputs['s_t']) / s_t_warped_bb_mask.sum()
                errs['MSE_bs'] = MSE_sum(
                    s_t_warped_bs * s_t_warped_bs_mask,
                    inputs['s_t']) / s_t_warped_bs_mask.sum()
                errs['MSE_sb'] = MSE_sum(
                    s_t_warped_sb * s_t_warped_sb_mask,
                    inputs['s_t']) / s_t_warped_sb_mask.sum()
                errs['MSE_ss'] = MSE_sum(
                    s_t_warped_ss * s_t_warped_ss_mask,
                    inputs['s_t']) / s_t_warped_ss_mask.sum()

                errs['MSE_bb_mask_shape'] = MSE_mean(s_t_warped_bb_mask,
                                                     s_t_warped_ss_mask_gt)
                errs['MSE_bs_mask_shape'] = MSE_mean(s_t_warped_bs_mask,
                                                     s_t_warped_ss_mask_gt)
                errs['MSE_sb_mask_shape'] = MSE_mean(s_t_warped_sb_mask,
                                                     s_t_warped_ss_mask_gt)
                errs['MSE_ss_mask_shape'] = MSE_mean(s_t_warped_ss_mask,
                                                     s_t_warped_ss_mask_gt)

                errs['total'] = errs['MSE_bb'] + errs['MSE_bs'] + errs['MSE_sb'] + errs['MSE_ss'] \
                              + errs['MSE_bb_mask_shape'] + errs['MSE_bs_mask_shape'] + errs['MSE_sb_mask_shape'] + errs['MSE_ss_mask_shape']

            if config.loss == 'image_ss':
                flow_ss = torch.nn.functional.interpolate(
                    input=moduleNetwork(inputs['s_t'], inputs['s_t_1']),
                    size=(config.height, config.width),
                    mode='bilinear',
                    align_corners=False)
                with torch.no_grad():
                    flow_ss_gt = torch.nn.functional.interpolate(
                        input=moduleNetwork_gt(inputs['s_t'], inputs['s_t_1']),
                        size=(config.height, config.width),
                        mode='bilinear',
                        align_corners=False)
                    s_t_warped_ss_mask_gt = warp(tensorInput=torch.ones_like(
                        inputs['s_t_1'], device=device),
                                                 tensorFlow=flow_ss_gt)

                s_t_warped_ss = warp(tensorInput=inputs['s_t_1'],
                                     tensorFlow=flow_ss)
                s_t_warped_ss_mask = warp(tensorInput=torch.ones_like(
                    inputs['s_t_1'], device=device),
                                          tensorFlow=flow_ss)

                optimizer.zero_grad()

                errs['MSE_ss'] = MSE_sum(
                    s_t_warped_ss * s_t_warped_ss_mask,
                    inputs['s_t']) / s_t_warped_ss_mask.sum()
                errs['MSE_ss_mask_shape'] = MSE_mean(s_t_warped_ss_mask,
                                                     s_t_warped_ss_mask_gt)
                errs['total'] = errs['MSE_ss'] + errs['MSE_ss_mask_shape']

            if config.loss == 'flow_only':
                flow_bb = torch.nn.functional.interpolate(
                    input=moduleNetwork(inputs['b_t'], inputs['b_t_1']),
                    size=(config.height, config.width),
                    mode='bilinear',
                    align_corners=False)
                flow_bs = torch.nn.functional.interpolate(
                    input=moduleNetwork(inputs['b_t'], inputs['s_t_1']),
                    size=(config.height, config.width),
                    mode='bilinear',
                    align_corners=False)
                flow_sb = torch.nn.functional.interpolate(
                    input=moduleNetwork(inputs['s_t'], inputs['b_t_1']),
                    size=(config.height, config.width),
                    mode='bilinear',
                    align_corners=False)
                flow_ss = torch.nn.functional.interpolate(
                    input=moduleNetwork(inputs['s_t'], inputs['s_t_1']),
                    size=(config.height, config.width),
                    mode='bilinear',
                    align_corners=False)

                s_t_warped_ss = warp(tensorInput=inputs['s_t_1'],
                                     tensorFlow=flow_ss)

                with torch.no_grad():
                    flow_ss_gt = torch.nn.functional.interpolate(
                        input=moduleNetwork_gt(inputs['s_t'], inputs['s_t_1']),
                        size=(config.height, config.width),
                        mode='bilinear',
                        align_corners=False)

                optimizer.zero_grad()

                # liteflow_flow_only
                errs['MSE_bb_ss'] = MSE_mean(flow_bb, flow_ss_gt)
                errs['MSE_bs_ss'] = MSE_mean(flow_bs, flow_ss_gt)
                errs['MSE_sb_ss'] = MSE_mean(flow_sb, flow_ss_gt)
                errs['MSE_ss_ss'] = MSE_mean(flow_ss, flow_ss_gt)
                errs['total'] = errs['MSE_bb_ss'] + errs['MSE_bs_ss'] + errs[
                    'MSE_sb_ss'] + errs['MSE_ss_ss']

            errs['total'].backward()
            optimizer.step()

            lr = adjust_learning_rate(optimizer, epoch, config.decay_rate,
                                      config.decay_every, config.lr_init)

            if itr % config.write_log_every_itr == 0:
                summary.add_scalar('loss/loss_mse', errs['total'].item(), itr)
                vutils.save_image(inputs['s_t_1'].detach().cpu(),
                                  '{}/{}_1_input.png'.format(
                                      config.LOG_DIR.sample, itr),
                                  nrow=3,
                                  padding=0,
                                  normalize=False)
                vutils.save_image(s_t_warped_ss.detach().cpu(),
                                  '{}/{}_2_warped_ss.png'.format(
                                      config.LOG_DIR.sample, itr),
                                  nrow=3,
                                  padding=0,
                                  normalize=False)
                vutils.save_image(inputs['s_t'].detach().cpu(),
                                  '{}/{}_3_gt.png'.format(
                                      config.LOG_DIR.sample, itr),
                                  nrow=3,
                                  padding=0,
                                  normalize=False)

                if config.loss == 'image_ss':
                    vutils.save_image(s_t_warped_ss_mask.detach().cpu(),
                                      '{}/{}_4_s_t_wapred_ss_mask.png'.format(
                                          config.LOG_DIR.sample, itr),
                                      nrow=3,
                                      padding=0,
                                      normalize=False)
                elif config.loss != 'flow_only':
                    vutils.save_image(s_t_warped_bb_mask.detach().cpu(),
                                      '{}/{}_4_s_t_wapred_bb_mask.png'.format(
                                          config.LOG_DIR.sample, itr),
                                      nrow=3,
                                      padding=0,
                                      normalize=False)

            if itr % config.refresh_image_log_every_itr == 0:
                remove_file_end_with(config.LOG_DIR.sample, '*.png')

            print_logs('TRAIN',
                       config.mode,
                       epoch,
                       itr_time,
                       itr,
                       data_loader.num_itr,
                       errs=errs,
                       lr=lr)
            itr += 1

        if epoch % config.write_ckpt_every_epoch == 0:
            ckpt_manager.save_ckpt(moduleNetwork,
                                   epoch,
                                   score=errs['total'].item())
コード例 #29
0
imsize = config.imsize
g_num = config.g_num
z_dim = config.z_dim
label_dim = config.label_dim
g_conv_dim = config.g_conv_dim
d_conv_dim = config.d_conv_dim
lambda_gp = config.lambda_gp

#training parameters
d_iters = config.d_iters
batch_size = config.batch_size
num_workers = config.num_workers

lr_decay = config.lr_decay
beta1 = config.beta1
beta2 = config.beta2

#pretrained
pretrained_model = config.pretrained_model

#misc
train = config.train
parallel = config.parallel
dataset = config.dataset
use_tensorboard = config.use_tensorboard

#paths
root_dir = config.root_dir

test_loader = Data_Loader(train, dataset, imsize, batch_size, shuf=True)
SAGAN_test(test_loader.loader()).test()
コード例 #30
0
    def __len__(self):
        return len(self.df)

    def __getitem__(self, idx):
        img_name = os.path.join(self.root_dir, self.df.iloc[idx, 0])
        img = cv2.imread(img_name)

        if self.transforms:
            img = self.transforms(img)

        if 'negative' in img_name: label = 0
        else: label = 1

        return img, label


if __name__ == "__main__":
    from data_loader import Data_Loader
    ds = Data_Loader(train=False,
                     dataset='mura',
                     mura_class='XR_HAND',
                     mura_type='',
                     image_path='/home/phuc/hdd/datasets',
                     image_size=64,
                     batch_size=64)
    dl, ds = ds.loader()

    for i, data in enumerate(dl):
        print(data)