Пример #1
0
def main():
    prompt="Enter the sequence length and max frames to be considered for feature extraction with spaces..."
    usr_data=raw_input(prompt)
    usr_data=usr_data.split(' ')
    ip=[np.int64(i) for i in usr_data]
    seq_length,max_frames=ip
    prompt="Enter the absolute file path where data sequences need to be created(sequences folder will be created if not present...) \n If default press enter"
    abspath=raw_input(prompt)
    if abspath=='':
        seq_path=os.path.join(os.path.dirname(os.path.dirname(__file__)),'data', 'sequences')
    else:        
        if os.path.isdir(os.path.join(abspath,'sequences')):
            pass
        else:
            os.mkdir(os.path.join(abspath,'sequences'))
    data = Dataset(data_length=seq_length,maxframes=max_frames,path=seq_path)
    feature_model=Feature_Extractor()
    for video in data.datafile():
        path = os.path.join('data','sequences',video[2]+'-'+str(seq_length)+'-features')
        if os.path.isfile(path + '.npy'):
            continue
        frames=data.get_frames(video)
        # Skip intermiediate frames
        frames=data.rescale_frames(frames)
        seq=[]
        for frame in frames:
            features=feature_model.extract(frame)
            seq.append(features)
        np.save(path,seq)
Пример #2
0
 def __init__(self, batch_size, data_path, model_path, output_path):
     self.epoch = 1
     self.batch_size = batch_size
     self.model = Model(batch_size)
     self.test_dataloader = Dataset(os.path.join(data_path, 'test'))
     self.model_path = model_path
     self.output_path = output_path
def get_unique_value():
    # change accordingly based on your data dimension
    alldata = [np.array([])] * 63

    indices = list(range(2613))
    path = train_path
    data = Dataset(indices, path)
    loader = DataLoader(data,
                        batch_size=1,
                        shuffle=False,
                        collate_fn=collate_fn)
    # go over each patient
    for batch, labels, seq_len in loader:
        data = batch[0].cpu().numpy()
        for i in range(0, 63):
            temp = np.unique(data[:, i])
            alldata[i] = np.append(alldata[i], temp)

    indices = list(range(656))
    path = valid_path
    data = Dataset(indices, path)
    loader = DataLoader(data,
                        batch_size=1,
                        shuffle=False,
                        collate_fn=collate_fn)
    for batch, labels, seq_len in loader:
        data = batch[0].cpu().numpy()
        for i in range(0, 63):
            temp = np.unique(data[:, i])
            alldata[i] = np.append(alldata[i], temp)

    for i in range(0, 63):
        alldata[i] = np.unique(alldata[i])

    return alldata
Пример #4
0
    def __init__(self, verbose=False, num_of_inputs=10, depth=10):
        torch.set_printoptions(linewidth=320)
        args = self.get_args()
        self.device = torch.device(
            "cuda:1" if torch.cuda.is_available() else "cpu")
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])

        batch_size = 10

        self.hw_model = model_generater(inputs=num_of_inputs, depth=depth)

        train_dataset = Dataset(train=True, model=self.hw_model)
        test_dataset = Dataset(train=False, model=self.hw_model)

        self.train_data_loader = torch.utils.data.DataLoader(
            train_dataset, batch_size=batch_size, shuffle=True, num_workers=1)
        self.test_data_loader = torch.utils.data.DataLoader(
            test_dataset, batch_size=batch_size, shuffle=True, num_workers=1)
        self.batch_size = batch_size
        self.verbose = verbose
        self.best_loss = 1e5
        # self.criterion = nn.MSELoss()
        self.criterion = nn.BCELoss()
        torch.manual_seed(42)
Пример #5
0
def get_dataloader(data_path, testdata_path):
    raw_data = pandas.read_csv(data_path)
    testraw_data = pandas.read_csv(testdata_path)
    train_loader = DataLoader(
        Dataset(raw_data.to_numpy(),
                dataDir=os.path.abspath(os.path.dirname(data_path))),
        **config.dataset_params)
    test_loader = DataLoader(
        Dataset(testraw_data.to_numpy(),
                dataDir=os.path.abspath(os.path.dirname(testdata_path))),
        **config.dataset_params)
    return train_loader, test_loader
Пример #6
0
def main():
    if args.mode == 'train':
        train_dataset = Dataset(name=args.dataset, mode='train', windows=[24, 32, 40], step=4)
        train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size, collate_fn=train_fn,
                                       shuffle=True, num_workers=args.workers, pin_memory=True)
        train(train_loader)
    if args.mode == 'test':
        test_dataset = Dataset(name=args.dataset, mode='test', windows=[24, 32, 40], step=4)
        test_loader = data.DataLoader(test_dataset, batch_size=args.batch_size, collate_fn=test_fn,
                                      shuffle=False, num_workers=args.workers, pin_memory=True)
        model = load_model(device)
        test(model, test_loader)
Пример #7
0
def test():
    epoch = 1
    batch_size = args.batch_size
    model_path = args.model_path
    data_path = args.data_path
    idx = 0
    confusion_matrix_total = np.zeros((10, 10))
    data_mode = args.data_mode

    data = Dataset(data_mode, data_path, is_shuffle=False)

    with tf.Graph().as_default():
        x, y, y_onehot, logits = Model(batch_size).build_model('test',
                                                               keep_rate=1)
        prediction = tf.argmax(logits, axis=1)
        accuracy, accuracy_update = tf.metrics.accuracy(labels=y,
                                                        predictions=prediction)
        batch_confusion = tf.confusion_matrix(labels=y,
                                              predictions=prediction,
                                              num_classes=10)

        with tf.Session() as sess:
            sess.run([
                tf.global_variables_initializer(),
                tf.local_variables_initializer()
            ])
            restorer = tf.train.Saver()

            try:
                restorer.restore(sess, tf.train.latest_checkpoint(model_path))
            except:
                print('No model in ' + model_path + ' to restore')
                raise

            while True:
                epoch_now, idx, imgs, labels = data.load_batch(batch_size, idx)
                _, batch_confusion_ = sess.run(
                    [accuracy_update, batch_confusion],
                    feed_dict={
                        x: imgs,
                        y: labels
                    })
                confusion_matrix_total += batch_confusion_
                if epoch_now == epoch:
                    np.save('./confusion.npy', confusion_matrix_total)
                    break

            accuracy_ = sess.run(accuracy)
            return accuracy_
Пример #8
0
def get_boundary_value():
    # change accordingly based on your data dimension
    indices = list(range(2613))
    path = train_path
    data = Dataset(indices, path)
    loader = DataLoader(data,
                        batch_size=1,
                        shuffle=False,
                        collate_fn=collate_fn)

    each_max = np.array([]).reshape(0, 63)
    each_min = np.array([]).reshape(0, 63)
    for batch, labels, seq_len in loader:
        data = batch[0].cpu().numpy()
        arr_max = np.amax(data, axis=0).reshape(1, 63)
        arr_min = np.amin(data, axis=0).reshape(1, 63)
        each_max = np.concatenate((each_max, arr_max), axis=0)
        each_min = np.concatenate((each_min, arr_min), axis=0)

    train_all_max = np.amax(each_max, axis=0).reshape(1, 63)
    train_all_min = np.amin(each_min, axis=0).reshape(1, 63)

    indices = list(range(656))
    path = valid_path
    data = Dataset(indices, path)
    loader = DataLoader(data,
                        batch_size=1,
                        shuffle=False,
                        collate_fn=collate_fn)

    each_max = np.array([]).reshape(0, 63)
    each_min = np.array([]).reshape(0, 63)
    for batch, labels, seq_len in loader:
        data = batch[0].cpu().numpy()
        arr_max = np.amax(data, axis=0).reshape(1, 63)
        arr_min = np.amin(data, axis=0).reshape(1, 63)
        each_max = np.concatenate((each_max, arr_max), axis=0)
        each_min = np.concatenate((each_min, arr_min), axis=0)

    val_all_max = np.amax(each_max, axis=0).reshape(1, 63)
    val_all_min = np.amin(each_min, axis=0).reshape(1, 63)

    all_max = np.amax(np.concatenate((val_all_max, train_all_max), axis=0),
                      axis=0).reshape(1, 63)
    all_min = np.amax(np.concatenate((val_all_min, train_all_min), axis=0),
                      axis=0).reshape(1, 63)

    A = np.concatenate((all_min, all_max), axis=0)
    return A.transpose()
Пример #9
0
def main():
    if args.mode == 'train':
        train_dataset = Dataset(mode='train')
        train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size, collate_fn=train_fn,
                                       shuffle=True, num_workers=args.workers, pin_memory=True)
        val_dataset = Dataset(mode='test')
        val_loader = data.DataLoader(val_dataset, batch_size=args.batch_size, collate_fn=test_fn,
                                     shuffle=False, num_workers=args.workers, pin_memory=True)
        train(train_loader, val_loader)
    if args.mode == 'test':
        test_dataset = Dataset(mode='test')
        test_loader = data.DataLoader(test_dataset, batch_size=args.batch_size, collate_fn=test_fn,
                                      shuffle=False, num_workers=args.workers, pin_memory=True)
        model = load_model(device)
        test(model, test_loader)
    def fit(self, X, y, test_X, test_y, train_mean, train_std):
        # plot_stroke(utils.un_normalize_data(X[:1], train_mean, train_std)[0])
        # ll
        # optimizer = torch.optim.RMSprop(self.model.parameters())
        # optimizer = torch.optim.Adam(self.model.parameters())
        # bce = nn.BCELoss(reduction='none')

        for epoch in range(self.epochs):
            training_set = Dataset(X, y, self.bs)
            while training_set.last_batch():
                # Transfer to GPU
                X_batch, y_batch, y_mask_batch, lens_batch = training_set.next_batch(
                )
                X_batch, y_batch, y_mask_batch, lens_batch = X_batch.to(
                    self.device), y_batch.to(self.device), y_mask_batch.to(
                        self.device), lens_batch.to(self.device)
                # print(training_set.cur_idx)
                e, ro, pi, mu, sigma, _, _ = self.model(X_batch, lens_batch)
                import pdb
                pdb.set_trace()

                N = self.get_likelihood(e, ro, pi, mu, sigma, y_batch)
                N = N.reshape(-1)

                y_ber_truth = y_batch[:, :, 0].reshape(-1).float()

                e = e.reshape(-1)

                y_mask_batch = y_mask_batch.reshape(-1)

                ber_loss = self.bce(e, y_ber_truth)
                loss_sum = torch.sum(N * y_mask_batch) + torch.sum(
                    ber_loss * y_mask_batch)
                loss = loss_sum / torch.sum(lens_batch)
                print(loss, loss_sum)

                self.optimizer.zero_grad()
                loss.backward()
                nn.utils.clip_grad_norm_(self.model.parameters(), 10)

                self.optimizer.step()

            if epoch % 5 == 0:
                to_stop = self.validate(test_X, test_y, epoch)
                self.create_stroke()
                if to_stop:
                    break
            break
def merge_labels_to_ckpt(ck_path: str, train_file: str):
    '''Merge labels to a checkpoint file.

    Args:
        ck_path(str): path to checkpoint file
        train_file(str): path to train set index file, eg. train.csv

    Return:
        This function will create a {ck_path}_patched.pth file.
    '''
    # load model
    print('Loading checkpoint')
    ckpt = torch.load(ck_path)

    # load train files
    print('Loading dataset')
    raw_data = pandas.read_csv(train_file)
    train_set = Dataset(raw_data.to_numpy())

    # patch file name
    print('Patching')
    patch_path = ck_path.replace('.pth', '') + '_patched.pth'

    ck_dict = {'label_map': train_set.labels}
    names = ['epoch', 'model_state_dict', 'optimizer_state_dict']
    for name in names:
        ck_dict[name] = ckpt[name]

    torch.save(ck_dict, patch_path)
    print('Patched checkpoint has been saved to {}'.format(patch_path))
Пример #12
0
def main(args):
    if not os.path.exists(args.save_path):
        os.mkdir(args.save_path)

    net = UNet(n_channels=3, n_classes=1)

    checkpoint = flow.load(args.pretrained_path)
    net.load_state_dict(checkpoint)

    net.to("cuda")

    x_test_dir, y_test_dir = get_datadir_path(args, split="test")

    test_dataset = Dataset(
        x_test_dir, y_test_dir, augmentation=get_test_augmentation(),
    )

    print("Begin Testing...")
    for i, (image, mask) in enumerate(tqdm(test_dataset)):
        show_image = image
        with flow.no_grad():
            image = image / 255.0
            image = image.astype(np.float32)
            image = flow.tensor(image, dtype=flow.float32)
            image = image.permute(2, 0, 1)
            image = image.to("cuda")

            pred = net(image.unsqueeze(0).to("cuda"))
            pred = pred.numpy()
            pred = pred > 0.5
        save_picture_name = os.path.join(args.save_path, "test_image_" + str(i))
        visualize(
            save_picture_name, image=show_image, GT=mask[0, :, :], Pred=pred[0, 0, :, :]
        )
Пример #13
0
def discrete_main(args):
    if not args.discrete:
        return
    HPS = Hps(args.hps_path)
    hps = HPS.get_tuple()
    model_path = path.join(args.ckpt_dir, args.load_test_model_name)
    dataset = Dataset(args.dataset_path, args.index_path, seg_len=hps.seg_len)
    data_loader = DataLoader(dataset, hps.batch_size)
    trainer = Trainer(hps,
                      data_loader,
                      args.targeted_G,
                      args.one_hot,
                      binary_output=False,
                      binary_ver=False)
    trainer.load_model(path.join(args.ckpt_dir, args.load_train_model_name),
                       model_all=True)
    data = [d.unsqueeze(0) for d in dataset]
    data = [trainer.permute_data(d)[1] for d in data]
    encoded = [trainer.encode_step(x) for x in data]
    kmeans, look_up = clustering(encoded, n_clusters=args.n_clusters)
    test(trainer, args.dataset_path, args.speaker2id_path, args.result_dir,
         args.enc_only, args.flag)
    finetune_discrete_decoder(trainer, look_up, model_path)
    discrete_test(trainer, args.dataset_path, args.speaker2id_path,
                  'discrete_' + args.result_dir, args.enc_only, args.flag)
Пример #14
0
    def trainIter(self, x_train, y_train, epochs, num_workers=6):
        criterion = CustomLoss(self.alpha, self.beta)
        training_set = Dataset(x_train, y_train)
        training_generator = data.DataLoader(training_set,
                    batch_size=self.batch_size, shuffle=True,
                    collate_fn=self.pad_and_sort_batch,
                    drop_last=True, num_workers=num_workers)
        
        for epoch in range(self.start, epochs+1):
            loss = 0
            for mini_batches in tqdm(training_generator):
                for i, (input_tensor, input_length, output_tensor, output_length) in enumerate(mini_batches):
                    # allocate tensors to GPU
                    input_tensor = input_tensor.to(device)
                    output_tensor = output_tensor.to(device)
                    loss += self.train(input_tensor, output_tensor,
                                        input_length, output_length, criterion)
            print("epoch {} average minibatch loss: {:.6f}".format(epoch, loss/len(training_generator)))

            # save model
            if epoch % 10 == 0:
                self.save_model(self.encoder, self.decoder, self.enc_optimizer,\
                self.dec_optimizer, epoch, "./models/seq2seq_{}_{}.tar".\
                                    format(epoch, loss), loss/len(training_generator))
                print("trained model saved.")
            self.loss_list.append(loss)
            self.loss_graph(self.loss_list) #save loss 
    def validate(self, test_x, test_y, epoch_id):
        '''
        This function print the validation loss after specified number of epochs and implements early stopping
        '''
        self.model.eval()
        test_set = Dataset(test_x, test_y, self.bs)
        total_loss = 0.0
        while test_set.last_batch():
            # Transfer to GPU
            X_batch, y_batch, y_mask_batch, lens_batch = test_set.next_batch()
            X_batch, y_batch, y_mask_batch, lens_batch = X_batch.to(
                self.device), y_batch.to(self.device), y_mask_batch.to(
                    self.device), lens_batch.to(self.device)
            # print(training_set.cur_idx)
            e, ro, pi, mu, sigma, _ = self.model(X_batch, lens_batch)
            # import pdb; pdb.set_trace()

            N = self.get_likelihood(e, ro, pi, mu, sigma, y_batch)
            N = N.reshape(-1)

            y_ber_truth = y_batch[:, :, 0].reshape(-1).float()

            e = e.reshape(-1)

            y_mask_batch = y_mask_batch.reshape(-1)

            ber_loss = self.bce(e, y_ber_truth)
            loss_sum = torch.sum(N * y_mask_batch) + torch.sum(
                ber_loss * y_mask_batch)
            loss = loss_sum / torch.sum(lens_batch)
            total_loss += list(loss.cpu().data.numpy().flatten())[0]
        self.model.train()
        print("Validation loss after ", epoch_id, " epochs : ", loss.data)
        if total_loss < self.validation_score:
            self.validation_score = total_loss
            self.valid_low_count = 0
            torch.save(self.model.state_dict(),
                       self.path + str(epoch_id) + ".pt")
        else:
            self.valid_low_count += 1
            print("Early stopping count increased from ",
                  self.valid_low_count - 1, ' to ', self.valid_low_count)

        if self.valid_low_count >= self.valid_low_count_max:
            return True
        else:
            return False
Пример #16
0
def train():
	idx = 0
	batch_size = args.batch_size
	learning_rate_start = 0.001
	epoch = args.epoch
	model_path = args.model_path
	data_path = args.data_path
	is_restore = args.is_restore

	data = Dataset('train', data_path)

	with tf.Graph().as_default():
		x, y, y_onehot, logits = Model(batch_size).build_model('train', keep_rate=0.5)
		cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_onehot, logits=logits, name='softmax_loss'))
		reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
		loss = tf.reduce_sum(cross_entropy) + tf.reduce_sum(reg_loss)
		global_step = tf.Variable(0, trainable=False)
		learning_rate = tf.train.exponential_decay(learning_rate_start, global_step, 10000, 0.1)
		optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
		train_op = optimizer.minimize(loss, global_step=global_step)
		tf.summary.scalar('loss', loss)
		summary = tf.summary.merge_all()

		with tf.Session() as sess:
			summary_writer = tf.summary.FileWriter('./graph', sess.graph)
			sess.run(tf.global_variables_initializer())
			saver = tf.train.Saver(max_to_keep=5)
			if is_restore:
				try:
					saver.restore(sess, tf.train.latest_checkpoint(model_path))
				except:
					print('No model in ' + model_path + ' to restore')
					raise

			print('Start training...')
			while True:
				epoch_now, idx, imgs, labels = data.load_batch(batch_size, idx)
				loss_, _, step, summary_ = sess.run([loss, train_op, global_step, summary], feed_dict={x: imgs, y:labels})
				summary_writer.add_summary(summary_, global_step=step)
				if step%20 == 0:
					print('Epoch: {0}, Step: {1}, Loss: {2}' .format(epoch_now, step, loss_)) 
				if step%200 == 0:
					saver.save(sess, os.path.join(model_path, 'model.ckpt'), global_step=step)
				if epoch_now == epoch:
					print('Finish training ...')
					break
Пример #17
0
class Test:
    def __init__(self, batch_size, data_path, model_path, output_path):
        self.epoch = 1
        self.batch_size = batch_size
        self.model = Model(batch_size)
        self.test_dataloader = Dataset(os.path.join(data_path, 'test'))
        self.model_path = model_path
        self.output_path = output_path
        
    def test(self):
        idx_test = 0
        block_mask, inverse_block_mask = create_block_mask(shape=[self.batch_size, 96, 96, 3])
        
        with tf.device('/gpu:0'):
            with tf.Graph().as_default():
                #input
                random = tf.placeholder(dtype=tf.float32, shape=[self.batch_size, 96, 96, 3])
                y = tf.placeholder(dtype=tf.float32, shape=[self.batch_size, 96, 96, 3])
                mask = tf.placeholder(dtype=tf.float32, shape=[self.batch_size, 96, 96, 3])
                inverse_mask = tf.placeholder(dtype=tf.float32, shape=[self.batch_size, 96, 96, 3])
                x = y*mask + random*inverse_mask
                
                #generator
                G_output_sample = self.model.generator(x)
                
                config = tf.ConfigProto()
                config.gpu_options.per_process_gpu_memory_fraction = 0.6
                
                with tf.Session(config=config) as sess:
                    sess.run(tf.global_variables_initializer())
                    restorer = tf.train.Saver()
                    try:
                        restorer.restore(sess, tf.train.latest_checkpoint(self.model_path))
                        print('Load model Success')
                    except:
                        print('No model to restore ...')
                        raise
                    
                    print('Start testing ...')
                    count = 0
                    while True:
                        epoch, idx_test, y_batch = self.test_dataloader.load_batch(self.batch_size, idx_test, size=[96, 96])
                        random_noise = np.random.normal(size=y_batch.shape)
                        G_output_out, x_batch = sess.run([G_output_sample, x], feed_dict={random:random_noise, y:y_batch, mask:block_mask, inverse_mask:inverse_block_mask})
                        
                        #visualize
                        G_output_out = G_output_out*inverse_block_mask + y_batch*block_mask
                        G_output_out = np.squeeze(G_output_out)
                        print('Saving image {0}' .format(str(count)+'-noiseX-test'))
                        plot(x_batch, name=str(count)+'-noiseX-test' ,output_path=self.output_path)
                        print('Saving image {0}' .format(str(count)+'-realX-test'))
                        plot(y_batch, name=str(count)+'-realX-test' ,output_path=self.output_path)
                        print('Saving image {0}' .format(str(count)+'-fakeG-test'))
                        plot(G_output_out, name=str(count)+'-fakeG-test' ,output_path=self.output_path)
                        count += 1
                        if epoch == self.epoch:
                            print('Finish ...')
                            break
Пример #18
0
def main():
    ds = Dataset('imdb')
    params = {
        'batch_size': 67,
        'shuffle': True,
        'num_workers': 8,
        'collate_fn': collate_fn
    }
    epochs = 4
    lr = 0.01
    tbptt_steps = 256
    training_generator = data.DataLoader(ds, **params)
    model = CharRNN(input_size=ds.encoder.get_vocab_size(),
                    embedding_size=8,
                    hidden_size=128,
                    output_size=ds.encoder.get_vocab_size(),
                    no_sentiments=3,
                    dense_size=32,
                    padding_idx=ds.encoder.get_id(PADDING_TOKEN),
                    n_layers=1)
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    step_no = 0
    for epoch in range(epochs):
        print('Epoch: ', epoch)
        for x_i, y_i, l_i in training_generator:
            model.reset_intermediate_vars()
            step_no += 1
            print(x_i.size())
            batch_loss = 0
            for step in range(l_i[0] // tbptt_steps +
                              (l_i[0] % tbptt_steps != 0)):
                von = tbptt_steps * step
                bis = min(tbptt_steps * (step + 1), l_i[0])
                out = model(x_i[:, von:bis])
                if step % 25 == 0:
                    print(model.attn[0].detach().numpy(),
                          model.attn[-1].detach().numpy())
                loss = model.loss(out, y_i, l_i, von, bis)
                batch_loss += loss
                optimizer.zero_grad()
                loss.backward()
                nn.utils.clip_grad_norm_(model.parameters(), 1.5)
                for p in model.parameters():
                    p.data.add_(-lr, p.grad.data)
                optimizer.step()

                model.detach_intermediate_vars()
            print('Total loss for this batch: ', batch_loss.item())
            if step_no % 30 == 1:
                gen_sample, sentis = model.generate_text(
                    ds.encoder, 'T', 200, 0.7)
                print_colored_text(gen_sample, sentis, ds.encoder)
                # Print an example with sentiments
                print_colored_text(x_i[-1].data.numpy(),
                                   get_sentiments(model, x_i[-1], 0.7),
                                   ds.encoder)
def run_training(args):
	config = tf.ConfigProto()
	config.gpu_options.allow_growth = True
	session = tf.Session(config=config)
	np.random.seed(2019)
	tf.compat.v1.set_random_seed(2019)
	# df = pd.read_csv('./data/reddit_nodes_weighted_full.csv', header=None, names=['source', 'target', 'weight'])
	df = pd.read_csv(args["directory"]+'/'+args["graph_file"], header=None, names=['source', 'target', 'weight'])
	G = nx.from_pandas_edgelist(df, edge_attr='weight', create_using=nx.Graph())
	full_filepath = nn_filepath(args)
	
	filepath = args["directory"]+'/'+full_filepath+'/checkpoint_{epoch:02d}-{val_loss:.5f}.hdf5'
	if os.path.isdir(args["directory"]+'/'+full_filepath): return
	
	os.mkdir(args["directory"]+'/'+full_filepath)

	with open(args["directory"]+'/embeddings/'+args["embedding_file"], 'r') as fp:
		embeddings = json.load(fp)

	data = Dataset(embeddings=embeddings, G=G, directory=args["directory"], graph_file=args["graph_file"], embedding_dim=args["embedding_dim"])

	classifier = Classifier(dense_classifier=args["dense_classifier"],
							embedding_dim=args["embedding_dim"],
							layers=args["layers"],
							dropout=args["dropout"],
							epochs=args["epochs"],
							validation_split=args["validation_split"],
							batch_size=args["batch_size"])

	print('about to get train data')
	train_data = data.train_data()
	print('got train data')
	test_data = data.test_data()
	print('got test data')

	
	classifier.train(filepath=filepath,
					patience=args["patience"], 
					validation_split=args["validation_split"], 
					batch_size=args["batch_size"], 
					epochs=args["epochs"], 
					train_data=train_data, 
					test_data=test_data)
def main():
    train_dataset = Dataset()
    train_loader = data.DataLoader(train_dataset,
                                   batch_size=args.batch_size,
                                   collate_fn=collect_fn,
                                   shuffle=True,
                                   num_workers=args.workers,
                                   pin_memory=True)
    neg_gen = negative_batch_generator(args.max_len, args.batch_size,
                                       args.neg_size)
    train(train_loader, neg_gen)
Пример #21
0
    def create_generator(self):
        # Generators
        training_set = Dataset('train', self.params_dataset_complete)
        training_generator = torch.utils.data.DataLoader(
            training_set, **self.params_data_generation)

        test_set = Dataset('test', self.params_dataset_complete)
        test_generator = torch.utils.data.DataLoader(
            test_set, **self.params_data_generation)

        val_set = Dataset('val', self.params_dataset_complete)
        val_generator = torch.utils.data.DataLoader(
            val_set, **self.params_data_generation)

        dict_generators = {
            'training_generator': training_generator,
            'test_generator': test_generator,
            'val_generator': val_generator
        }

        return dict_generators
Пример #22
0
def main():
    torch.set_printoptions(linewidth=320)
    args = get_args()
    print(args)

    # set device
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    patches_train_dir = '/media/yotamg/bd0eccc9-4cd5-414c-b764-c5a7890f9785/Yotam/Patches/train'
    patches_test_dir = '/media/yotamg/bd0eccc9-4cd5-414c-b764-c5a7890f9785/Yotam/Patches/test'

    train_filelist, test_filelist = get_patches_filelist(patches_train_dir, patches_test_dir)
    train_label_filelist, test_label_filelist = train_filelist, test_filelist

    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

    train_dataset = Dataset(image_filelist=train_filelist, label_filelist=train_label_filelist, train=True,
                            pickle_name='train_patches.pickle', transforms=transform, target_mode=args.target_mode,
                            label_loader=patch_read)
    test_dataset = Dataset(image_filelist=test_filelist, label_filelist=test_label_filelist, train=False,
                           pickle_name='test_patches.pickle', transforms=transform, target_mode=args.target_mode,
                           label_loader=patch_read)

    train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=512, shuffle=True, num_workers=1)
    test_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=512, shuffle=True, num_workers=1)

    net = Net(device=device, mode=args.target, target_mode=args.target_mode)
    model_name = 'DepthNet'

    if args.load_model:
        load_model(net, device,fullpath=args.load_path)

    if args.train:
        train_loss, val_loss = train(net=net, train_data_loader=train_data_loader, test_data_loader=test_data_loader, device=device, num_epochs=args.epochs)
        _plot_fig(train_loss, val_loss, model_name+'-Losses')
        save_model(net, epoch=args.epochs, experiment_name=args.experiment_name)
Пример #23
0
def extract_features(seq_length,max_frames,abspath=None):
    """ This function is to ease the training process to analyze the performance of the algorithm for various hyper parameters."""
    if abspath=='' or abspath==None:
        seq_path=os.path.join(os.path.dirname(os.path.dirname(__file__)),'data', 'sequences')
    else:        
        if os.path.isdir(os.path.join(abspath,'sequences')):
            continue
        else:
            os.mkdir(os.path.join(abspath,'sequences'))
    data = Dataset(data_length=seq_length,maxframes=max_frames,path=seq_path)
    feature_model=Feature_Extractor()
    for video in data.datafile():
        path = os.path.join('data','sequences',video[2]+'-'+str(seq_length)+'-features')
        if os.path.isfile(path + '.npy'):
            continue
        frames=data.get_frames(video)
        # Skip intermiediate frames
        frames=data.rescale_frames(frames)
        seq=[]
        for frame in frames:
            features=feature_model.extract(frame)
            seq.append(features)
        np.save(path,seq)
Пример #24
0
def _eval_fromVideo(model, device, video_path: str, labels=[]) -> list:
    """Inference the model and return the labels.

    Args:
        checkpoint(str): The checkpoint where the model restore from.
        path(str): The path of videos.
        labels(list): Labels of videos.

    Returns:
        A list of labels of the videos.
    """
    if not os.path.exists(video_path):
        raise ValueError('Invalid path! which is: {}'.format(video_path))

    # Do inference
    pred_labels = []
    video_names = glob.glob(os.path.join(video_path,
                                         '20180802-094306_912.mp4'))
    with torch.no_grad():
        for video in tqdm(video_names, desc='Inferencing'):
            # read images from video
            video_fd = cv2.VideoCapture(os.path.join(video_path, video))
            total = int(video_fd.get(cv2.CAP_PROP_FRAME_COUNT))
            for i in range(total // 30):
                images = load_imgs_from_video(video_fd)
                # apply transform
                images = [Dataset.transform(None, img) for img in images]
                # stack to tensor, batch size = 1
                images = torch.stack(images, dim=0).unsqueeze(0)
                # do inference
                images = images.to(device)
                pred_y = model(images)  # type: torch.Tensor
                y_ordinalSoftmax = ordinal_softmax(pred_y)
                probs_df = pd.DataFrame(y_ordinalSoftmax.cpu().data.numpy())

                probs_df.head()
                labelstemp = probs_df.idxmax(axis=1)

                pred_labels.append([video + "-" + str(i), labelstemp.values])
                print(pred_labels[-1])

    if len(labels) > 0:
        acc = accuracy_score(pred_labels, labels)
        print('Accuracy: %0.2f' % acc)

    # Save results
    pandas.DataFrame(pred_labels).to_csv('result.csv', index=False)
    print('Results has been saved to {}'.format('result.csv'))

    return pred_labels
Пример #25
0
def main():
    # Configuration
    config = Config()
    # Device
    device = torch.device(config.device)
    # Model
    model = AAE(config).train().to(device)
    # Optimizer
    optimizer = torch.optim.Adam(list(model.encoder.parameters()) +
                                 list(model.decoder.parameters()),
                                 lr=config.lr)
    # Data
    dataloader = Dataset(sys.argv[1], sep='\t')

    #### TRAIN
    train(model, optimizer, dataloader, config, device)
Пример #26
0
def draw_auc():
    fpr = dict()
    tpr = dict()
    roc_auc = dict()

    args = parse_args()
    data_path = args.data_path
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    raw_data = pandas.read_csv(os.path.join(data_path, '%s.csv' % 'test'))
    dataloader = DataLoader(Dataset(raw_data.to_numpy()), **config.dataset_params)
    use_cuda = torch.cuda.is_available()
    device = torch.device('cuda' if use_cuda else 'cpu')
    model = Baseline()
    device_count = torch.cuda.device_count()
    if device_count > 1:
        print('使用{}个GPU训练'.format(device_count))
        model = nn.DataParallel(model)
    model.to(device)
    ckpt = {}
    # 从断点继续训练
    if args.restore_from is not None:
        ckpt = torch.load(args.restore_from)
        # model.load_state_dict(ckpt['net'])
        model.load_state_dict(ckpt['model_state_dict'])
        print('Model is loaded from %s' % (args.restore_from))

    y_test, y_score = test(model, dataloader, 'baseline', device)

    for i in range(2):
        fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
        roc_auc[i] = auc(fpr[i], tpr[i])
        plt.figure()
        lw = 2
        plt.plot(fpr[0], tpr[0], color='darkorange',
                 lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[0])
        plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.title('Receiver operating characteristic example')
        plt.legend(loc="lower right")
        plt.show()
Пример #27
0
def _eval_fromCSV(model, device, csv_path: str):
    raw_data = pandas.read_csv(csv_path)
    dataloaders = DataLoader(Dataset(raw_data.to_numpy(),
                                     dataDir=os.path.abspath(
                                         os.path.dirname(csv_path))),
                             batch_size=1,
                             shuffle=False)
    model.eval()

    print('Size of Test Set: ', len(dataloaders.dataset))

    # 准备在测试集上验证模型性能
    test_loss = 0
    y_gd = []
    y_pred = []

    # 不需要反向传播,关闭求导
    with torch.no_grad():
        for X, y in tqdm(dataloaders, desc='Validating'):
            # 对测试集中的数据进行预测
            X, y = X.to(device), y.to(device)
            y_ = model(X)

            # 收集prediction和ground truth
            y_ordinalSoftmax = ordinal_softmax(y_)
            probs_df = pd.DataFrame(y_ordinalSoftmax.cpu().data.numpy())

            probs_df.head()
            labels = probs_df.idxmax(axis=1)
            y_gd += y.cpu().numpy().tolist()
            # y_pred += y_.cpu().numpy().tolist()
            y_pred += labels.to_numpy().tolist()
    plt.plot(y_pred, label='current modeled')
    plt.plot(y_gd, label='accessed')
    plt.show()
    np.save(
        'predicted' + os.path.splitext(os.path.basename(csv_path))[0] + '.npy',
        y_pred)
    np.save('y_gd' + os.path.splitext(os.path.basename(csv_path))[0] + '.npy',
            y_gd)
    return y_pred, y_gd
Пример #28
0
def get_valid_weights():
    
    val_data = Dataset(valid_indices, valid_path)
    val_loader = DataLoader(val_data, batch_size=1024, shuffle=True, collate_fn=collate_fn)

    pos_l = np.zeros((10))
    neg_l = np.zeros((10))
    weights = np.zeros((10))

    for batch, labels, seq_len in val_loader:
        neg_l += torch.sum(torch.sum((labels == 0), dim=0),dim=0).cpu().numpy()
        pos_l += torch.sum(torch.sum((labels == 1), dim=0),dim=0).cpu().numpy()
        t_weights = torch.sum(torch.sum((labels == 0), dim=0),dim=0).cpu().numpy() * v_neg_weights.cpu().numpy()
        t_weights += torch.sum(torch.sum((labels == 1), dim=0),dim=0).cpu().numpy()
        weights += t_weights / ((torch.sum(labels == 1).cpu().numpy()+ torch.sum(labels == 0).cpu().numpy())/10)

    neg_weights = pos_l/neg_l
    weights /= len(val_loader)
    multiplier = (1/(weights / max(weights)))
    multiplier = (1/np.mean(weights * multiplier)) * multiplier
    return neg_weights, multiplier
Пример #29
0
 def __init__(self,
              epoch,
              batch_size,
              data_path,
              model_path,
              output_path,
              graph_path,
              restore=False):
     self.batch_size = batch_size
     self.model = Model(batch_size)
     self.train_dataloader = Dataset(os.path.join(data_path, 'train'))
     self.train_test_dataloader = Dataset(os.path.join(data_path, 'train'))
     self.test_dataloader = Dataset(os.path.join(data_path, 'test'))
     self.epoch = epoch
     self.model_path = model_path
     self.output_path = output_path
     self.graph_path = graph_path
     self.restore = restore
Пример #30
0
        params=list(net.parameters())+list(G2.parameters())+list(G3.parameters())
        optimizer=torch.optim.Adam(params,lr=lr,betas=(0.5,0.999))

    vgg_model=vgg16(pretrained=True).features[:16]
    vgg_model=vgg_model.to(device)
    for param in vgg_model.parameters():
        param.requires_grad=False
    loss_network=LossNetwork(vgg_model)
    loss_network.eval()
    loss_lap=Lap()
    start_epoch=0
    loss_rec1=nn.SmoothL1Loss()
    loss_rec2=nn.MSELoss()
    num=0
    avg=nn.AvgPool2d(3,stride=2,padding=1)
    train_data=Dataset("./dataset/RICE1/")
    train_dataloader=DataLoader(train_data,batch_size=batch_size,shuffle=True,num_workers=4,pin_memory=True)
    for epoch in range(start_epoch,EPOCH):
        psnr_list=[]
        start_time=time.time()
        adjust_learning_rate(optimizer,epoch)

        for batch_id,train_data in enumerate(train_dataloader):
            cloud,gt=train_data
            optimizer.zero_grad()
            cloud=cloud.to(device)
            gt=gt.to(device)
            gt_quarter_1=F.interpolate(gt,scale_factor=0.25,recompute_scale_factor=True)
            gt_quarter_2=F.interpolate(gt,scale_factor=0.25,recompute_scale_factor=True)

            if train_phrase==1: