コード例 #1
0
ファイル: data_processer.py プロジェクト: Xiongzd/Xiongzd
    def data_processer(self):
        configs = json.load(open('config.json', 'r'))

        ## 载入数据
        data = pd.read_excel('features.xlsx')
        all_data = data.head(500)

        ## 数据预处理
        scaler = MinMaxScaler(feature_range=(0, 1))  # 将数据压缩至0-1之间
        sel_col = configs['data']['columns']
        all_data = all_data[sel_col]

        # 将数据集每个特征都压缩到0-1之间
        for col in sel_col[1:]:
            all_data[col] = scaler.fit_transform(all_data[col].values.reshape(
                -1, 1))

        # print(all_data)

        all_data = torch.tensor(np.array(all_data))

        length = len(all_data)
        tr_val_slip = int(configs['data']['train_test_split'] *
                          length)  # 将前80%的数据作为训练集

        x = torch.zeros(length - configs['data']['sequence_length'],
                        configs['data']['sequence_length'],
                        configs['model']['layers']['input_dim'])
        y = torch.zeros(length - configs['data']['sequence_length'],
                        configs['data']['sequence_length'],
                        configs['model']['layers']['output_dim'])

        for i in range(0, length - configs['data']['sequence_length'] - 1):
            x[i] = all_data[i:i + configs['data']['sequence_length']]
            y[i] = all_data[i + 1:i + configs['data']['sequence_length'] +
                            1][:, 0].reshape(-1, 1)

        train_x = x[0:tr_val_slip]
        train_y = y[0:tr_val_slip]
        valid_x = x[tr_val_slip:]
        valid_y = y[tr_val_slip:]

        train_set = DataSet(train_x, train_y)
        valid_set = DataSet(valid_x, valid_y)
        train_loader = DataLoader(train_set,
                                  batch_size=configs['training']['batch_size'],
                                  shuffle=False)
        valid_loader = DataLoader(valid_set,
                                  batch_size=configs['training']['batch_size'],
                                  shuffle=False)

        return train_loader, valid_loader
コード例 #2
0
ファイル: utils.py プロジェクト: lundha/Active-Learning
def load_data_pool(train=False, arg=None) -> DataSet:
    '''
    Load data pool if it not exist, else return 
    '''
    if train:
        data_dir = arg['data_dir'] + "train/"
    else:
        data_dir = arg['data_dir'] + "test/"

    print(data_dir)
    num_classes = arg['num_classes']
    file_ending = arg['file_ending']
    print(file_ending)
    print(num_classes)

    header_file = data_dir + "header.tfl.txt"
    filename = data_dir + "image_set.data"
    
    
    try:
        dataset = DataSet(data_dir=data_dir, header_file=header_file, csv_file=filename, file_ending=file_ending,
                                    num_classes=num_classes, train=train)
    except Exception as e:
        print(f"Could not load dataset, exception: {e}")

    return dataset
コード例 #3
0
ファイル: demo_ml_10M.py プロジェクト: qingquansong/CVRCF
def testing_phase(model, sess, train_all, pred_all, mark_u_time_end, mark_v_time_end, hidden_U, hidden_V):
    MSE, RMSE, MSE1, RMSE1, N, N1 = [], [], [], [], [], []
    test_data = DataSet('data/test.txt', args, mark_u_time_end, mark_v_time_end)
    g = 0
    while test_data.finish != 1:
        g += 1

        # Get Data Batch
        data, batch_all = get_batch_data(test_data)

        # Test
        test_one_step(batch_all, model, sess, pred_all, hidden_U, hidden_V, MSE, RMSE, MSE1, RMSE1, N, N1)

        # Updating
        sp_u_indices, sp_u_shape, sp_u_val, \
        sp_u_indices_res, sp_u_shape_res, sp_u_val_res, \
        sp_v_indices, sp_v_shape, sp_v_val, \
        sp_v_indices_res, sp_v_shape_res, sp_v_val_res, \
        inputs_u_idx, inputs_v_idx, \
        inputs_idx_pair, all_data, \
        siz, mark_new_user_movie, \
        re_sp_u_ids, u_seg_ids, \
        re_sp_v_ids, v_seg_ids = batch_all


        loss0, loss = 0, 100
        t = 0
        pbar = tqdm(total=args.test_max_t)
        while abs(loss - loss0) / abs(loss) > 1e-2 and t < args.test_max_t:
            t += 1
            pbar.update(1)
            _, loss, hidden_us, hidden_vs \
                = sess.run([train_all['train_op'], train_all['elbo'], train_all['hidden_us'], train_all['hidden_vs']],
                           feed_dict={model.inputs_u: (sp_u_indices, sp_u_val, sp_u_shape),
                                      model.inputs_v: (sp_v_indices, sp_v_val, sp_v_shape),
                                      model.inputs_u_res_1: (sp_u_indices_res, sp_u_val_res, sp_u_shape_res),
                                      model.inputs_v_res_1: (sp_v_indices_res, sp_v_val_res, sp_v_shape_res),
                                      model.re_sp_u_ids: re_sp_u_ids,
                                      model.u_seg_ids: u_seg_ids,
                                      model.re_sp_v_ids: re_sp_v_ids,
                                      model.v_seg_ids: v_seg_ids,
                                      model.h_U: hidden_U[inputs_u_idx, :],
                                      model.h_V: hidden_V[inputs_v_idx, :],
                                      model.inputs_u_idx: inputs_u_idx,
                                      model.inputs_v_idx: inputs_v_idx,
                                      model.inputs_idx_pair: inputs_idx_pair[:, 0:4],
                                      model.ratings: inputs_idx_pair[:, 4]})
            pbar.set_description('Testing Batch: %d, loss = %g' % (g, loss))
        pbar.close()
        sys.stdout.flush()

        for i in range(inputs_u_idx.shape[0]):
            tmp_idx = int(max(inputs_idx_pair[inputs_idx_pair[:, 0] == i, 2]))
            hidden_U[inputs_u_idx[i], :] = hidden_us[i, tmp_idx, :]
        for j in range(inputs_v_idx.shape[0]):
            tmp_idx = int(max(inputs_idx_pair[inputs_idx_pair[:, 1] == j, 3]))
            hidden_V[inputs_v_idx[j], :] = hidden_vs[j, tmp_idx, :]
コード例 #4
0
def get_PLANKTON10(load_data_args):
    data_dir = "../" + load_data_args['data_dir'] + "/"
    print(data_dir)
    header_file = data_dir + 'header.tfl.txt'
    filename = data_dir + 'image_set.data'
    file_ending = load_data_args['file_ending']
    num_classes = load_data_args['num_classes']
    img_dim = load_data_args['img_dim']
    X_tr, X_te = [], []
    Y_tr, Y_te = [], []

    if not (os.path.exists(f"{data_dir}data.npy")
            and os.path.exists(f"{data_dir}labels.npy")):

        try:
            dataset = DataSet(data_dir=data_dir,
                              header_file=header_file,
                              csv_file=filename,
                              file_ending=file_ending,
                              transform=None,
                              num_classes=num_classes,
                              train=True,
                              img_dim=img_dim)

            _images = dataset.images
            _labels = dataset.labels

        except Exception as e:
            print(f"Could not load dataset, exception: {e}")
            sys.exit('No dataset')

    else:
        _images = np.load(f"{data_dir}data.npy", allow_pickle=True)
        _labels = np.load(f"{data_dir}labels.npy", allow_pickle=True)

    print(f"Shape of images: {_images.shape}")

    shuffled_indices = np.random.permutation([_ for _ in range(len(_images))])
    split = int(len(_images) / 5)

    X_valid, Y_valid = _images[shuffled_indices[:int(split // 5)]], _labels[
        shuffled_indices[:int(split // 5)]]
    X_te, Y_te = _images[shuffled_indices[int(split // 5):split]], _labels[
        shuffled_indices[int(split // 5):split]]
    X_tr, Y_tr = _images[shuffled_indices[split:]], _labels[
        shuffled_indices[split:]]

    Y_tr, Y_te, Y_valid = torch.from_numpy(Y_tr), torch.from_numpy(
        Y_te), torch.from_numpy(Y_valid)

    return X_tr, Y_tr, X_te, Y_te, X_valid, Y_valid
コード例 #5
0
    classification = class_unseen(phi, scores)
    #print("Current TestLoss: {}".format(loss))
    return classification


if __name__ == '__main__':
    tf.compat.v1.enable_eager_execution()
    gpu = tf.config.experimental.list_physical_devices('GPU')
    print("Num GPUs Available: ", len(gpu))
    if len(gpu) > 0:
        tf.config.experimental.set_memory_growth(gpu[0], True)
        tf.config.experimental.set_memory_growth(gpu[1], True)

    # read dataset
    path_root = os.path.abspath(os.path.dirname(__file__))
    bird_data = DataSet("/Volumes/Watermelon")  # DataSet(path_root)
    phi_train = bird_data.get_phi(set=0)
    w = bird_data.get_w(alpha=1)  # (50*150)
    train_class_list, test_class_list = bird_data.get_class_split(mode="easy")
    train_ds, test_ds = bird_data.load_gpu(batch_size=BATCH_SIZE)

    #path_root = os.path.abspath(os.path.dirname(__file__))
    #database = DataSet("/Volumes/Watermelon")  # path_root)
    #PHI = database.get_phi()
    #DS, DS_test = database.load_gpu(batch_size=5)  # image_batch, label_batch
    modelaki = FinalModel()

    # define loss and opt functions
    loss_fun = Loss().final_loss
    step = tf.Variable(0, trainable=False)
    boundaries = [187 * 5, 187 * 10]
コード例 #6
0
from dataloader import DataSet
from Losses import Loss
import sys

sys.path.append("../src")
from jointmodel import JFL

CHANNELS = 512
N_CLASSES = 200
SEMANTIC_SIZE = 28
BATCH_SIZE = 5
IMG_SIZE = 448
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)

# read dataset
database = DataSet("/Volumes/Watermelon")
PHI = database.get_phi()
DS, DS_test = database.load_gpu(batch_size=BATCH_SIZE)

tf.compat.v1.enable_eager_execution()
strategy = tf.distribute.MirroredStrategy()
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))

BUFFER_SIZE = 5
BATCH_SIZE_PER_REPLICA = 32
GLOBAL_BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
EPOCHS = 30

train_dataset = DS
train_dist_dataset = strategy.experimental_distribute_dataset(train_dataset)
コード例 #7
0
# Main module to train the model, load the data,
# do gradient descent etc. followed by saving our model for later testing
from dataloader import DataSet, create_samplers
from model import Model
from visualizer import Visualizer
from options import TrainOptions
import torch
from torchvision.transforms import *
import torch.optim as optim
import numpy as np
import os
# Get the Hyperparaeters
opt = TrainOptions().parse()

sample_dataset = DataSet(opt, "/media/shubh/PranayHDD/Kinect/")
train_sampler, val_sampler = create_samplers(sample_dataset.__len__(),
                                             opt.split_ratio)
data_loader = torch.utils.data.DataLoader(sample_dataset,
                                          sampler=train_sampler,
                                          batch_size=opt.batch_size,
                                          num_workers=opt.num_workers)
data_val_loader = torch.utils.data.DataLoader(sample_dataset,
                                              sampler=val_sampler,
                                              batch_size=opt.val_batch_size,
                                              num_workers=0,
                                              shuffle=False)

# Check if gpu available or not
device = torch.device("cuda" if (
    torch.cuda.is_available() and opt.use_gpu) else "cpu")
opt.device = device
コード例 #8
0
# Test  the model, load the data,
# followed by saving our model for later testing
from dataloader import DataSet
from model import Model
from options import TestOptions
import torch
from torchvision.transforms import *
import numpy as np 
import os
import cv2
import matplotlib.pyplot as plt
import pandas as pd
# Get the Hyperparaeters 
opt = TestOptions().parse()

target_dataset = DataSet(opt,'/media/shubh/PranayHDD/Kinect/')
target_loader = torch.utils.data.DataLoader(target_dataset,batch_size=opt.batch_size,num_workers=opt.num_workers,shuffle=False)

device = torch.device("cuda" if (torch.cuda.is_available() and opt.use_gpu) else "cpu")
opt.device = device
model = Model(opt)
if opt.use_gpu:
	model = model.cuda()	
	model = torch.nn.DataParallel(model, device_ids=opt.gpus)

# Load the weights and make predictions
model.load_state_dict(torch.load('./checkpoints/' + 'model_{}.pt'.format(opt.load_epoch)))

# Print our model 
print('------------ Model -------------')
print(model)
コード例 #9
0
ファイル: train.py プロジェクト: umangS77/Assignment
import os
import numpy as np 
import torch
from torchvision.transforms import *
import torch.optim as optim

from options import TrainOptions
from model import Model
from dataloader import DataSet


# Get the Hyperparaeters 
opt = TrainOptions().parse()


sample_dataset = DataSet(opt,"../dataset/IMFDB")

sample_loader = torch.utils.data.DataLoader(sample_dataset,batch_size=opt.batch_size,num_workers=5,shuffle=False)


# Check if gpu available or not
device = torch.device("cuda" if (torch.cuda.is_available() and opt.use_gpu) else "cpu")
opt.device = device

# Load the model and send it to gpu
model = Model(opt,15)
model = model.to(device)
if opt.use_gpu:	
	model = torch.nn.DataParallel(model, device_ids=opt.gpus)

# Print our model 
コード例 #10
0
# followed by saving our model for later testing
from dataloader import DataSet
from model import Model
from options import TestOptions
import torch
from torchvision.transforms import *
import numpy as np 
import os
from sklearn.metrics import confusion_matrix,roc_curve
import cv2
torch.multiprocessing.set_sharing_strategy('file_system')
import matplotlib.pyplot as plt
# Get the Hyperparaeters 
opt = TestOptions().parse()

target_dataset = DataSet(opt,"./dataset/FindIt-Dataset-Test/T1-test/img/", "./dataset/FindIt-Dataset-Test/T1-Test-GT.xml","./dataset/FindIt-Dataset-Test/T2-test/img/" )
target_loader = torch.utils.data.DataLoader(target_dataset,batch_size=opt.val_batch_size,num_workers=opt.workers//5,shuffle=False)

# Load the model and send it to gpu
test_transforms =  transforms.Compose([ transforms.Normalize(mean = [ 0., 0., 0.,0.,0.,0. ],
						 std = [ 1/0.229, 1/0.224, 1/0.225,1.,1.,1. ]),
	transforms.Normalize(mean = [ -0.485, -0.456, -0.406,0.,0.,0. ],
						 std = [ 1., 1., 1.,1.,1.,1. ]) ])


device = torch.device("cuda" if (torch.cuda.is_available() and opt.use_gpu) else "cpu")
opt.device = device
model = Model(opt)
if opt.use_gpu:

	model = model.cuda()	
コード例 #11
0
from model import ModelBuilder

if __name__ == "__main__":

    df = Reader.read_csv("data",
                         "sentiment_train.csv",
                         config="default",
                         streamType="csv",
                         columns="0,1",
                         filter="full",
                         count=5,
                         header="1",
                         transformers=None)
    tokenizer = Utils.get_text_tokenizer(df, 1)
    label_encoder = Utils.get_label_encoder(df, 0)
    encoded_labels = DataSet.get_encodered_labels(df, 0, label_encoder)
    inputdata = DataSet.text_to_matrix(df, 1, tokenizer)
    modeldef = ModelBuilder.create_model(
        'Keras Sequential Model',
        shape='2,2',
        config=
        ('{"loss_function":"categorical_crossentropy","optimizer":"adam"}', [
            '{ "layer_type":"Dense" ,"activation":"relu","optimizer":"Adam","threshold":"512","input_shape":"10000,"}',
            '{ "layer_type":"Dropout" ,"activation":"relu","optimizer":"Adam","threshold":".5","input_shape":""}',
            '{ "layer_type":"Dense" ,"activation":"softmax","optimizer":"Adam","threshold":"4","input_shape":""}'
        ]))
    test_df = Reader.read_csv("data",
                              "sentiment_test.csv",
                              config="default",
                              streamType="df",
                              columns="0,1",
コード例 #12
0
# do gradient descent etc. followed by saving our model for later testing
from dataloader import DataSet, create_samplers
from model import Model
from options import TrainOptions
import torch
from torchvision.transforms import *
import torch.optim as optim
import numpy as np
from visualizer import Visualizer
import os
# Get the Hyperparaeters
opt = TrainOptions().parse()

import matplotlib.pyplot as plt

sample_dataset = DataSet(opt, "./Find_it_eval_patches/")

train_sampler, val_sampler = create_samplers(sample_dataset.__len__(),
                                             opt.split_ratio)
sample_loader = torch.utils.data.DataLoader(sample_dataset,
                                            sampler=train_sampler,
                                            batch_size=opt.batch_size,
                                            num_workers=15)
sample_val_loader = torch.utils.data.DataLoader(sample_dataset,
                                                sampler=val_sampler,
                                                batch_size=opt.val_batch_size,
                                                num_workers=5,
                                                shuffle=False)

target_dataset = DataSet(opt, "./Find_it_eval_patches/")
コード例 #13
0
ファイル: BaseModel.py プロジェクト: LindsayXX/DD2412_project
    # scores:  batch_size*200*1
    # labels: batch_size * 1
    return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=score))



if __name__ == '__main__':
    #os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
    #import tensorflow as tf
    tf.compat.v1.enable_eager_execution()
    gpu = tf.config.experimental.list_physical_devices('GPU')
    print("Num GPUs Available: ", len(gpu))
    if gpu:
        tf.config.experimental.set_memory_growth(gpu[0], True)
    path_root = os.path.abspath(os.path.dirname(__file__))  # '/content/gdrive/My Drive/data'
    bird_data = DataSet(path_root)
    # load all imgs
    phi = bird_data.get_phi()
    train_ds, test_ds = bird_data.load_gpu(batch_size=BATCH_SIZE)
    # only take 1000 images for local test
    #train_ds = bird_data.load(GPU=False, train=True, batch_size=32)
    #test_ds = bird_data.load(GPU=False, train=False, batch_size=32)

    #image_batch, label_batch = next(iter(train_ds))

    model = BaseModel(150, 312)
    #opt = tf.keras.optimizers.SGD(learning_rate=0.001, momentum=0.9)  # or SGDW with weight decay
    opt = tfa.optimizers.SGDW(
        learning_rate=0.0001, weight_decay=5 * 1e-4, momentum=0.9)

    ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, net=model)
コード例 #14
0
ファイル: train.py プロジェクト: chrimerss/Nowcast
def main():
    global use_gpu, EVENTS
    # set up some parameters
    batch_size = 2
    lr = 1e-3
    logging_path = 'logging/'
    num_epoches = 100
    epoch_to_save = 10

    # print("# of training samples: %d\n" %int(len(dataset_train)))

    # model= Nowcast(hidden_channels=16,use_gpu=use_gpu)
    model = UNet()
    # model=RadarNet(hidden_layers=16,use_gpu=True, device=0)
    print(model)
    num_params(model)
    model.apply(init_weights)
    # criterion= ComLoss()
    criterion = torch.nn.MSELoss()

    # model.load_state_dict(torch.load('../logging/newest-5_8.pth'))

    if use_gpu:
        model = model.cuda()
        criterion.cuda()

    #optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    scheduler = MultiStepLR(optimizer, milestones=[40, 80], gamma=0.2)

    #record
    writer = SummaryWriter(logging_path)

    #start training
    model.train()
    step = 0
    for epoch in range(num_epoches):
        start = time.time()

        for param_group in optimizer.param_groups:
            print('learning rate %f' % param_group['lr'])

        for e, event in enumerate(EVENTS[:2]):
            # ====================normal===============#
            dataset_train = DataSet(event=event)
            loader_train = DataLoader(dataset=dataset_train,
                                      num_workers=8,
                                      batch_size=batch_size,
                                      shuffle=True)
            # ====================DALI===============#
            # loader_train = get_iter_dali(event=EVENTS[0], batch_size=2,
            # num_threads=8)

            for i, data in enumerate(loader_train):
                # input size: (4,10,1,200,200)
                # target size: (4,10,1,200,200)
                # ====================normal===============#
                # input_train=Variable(torch.rand(size=(1,10,1,200,200)))
                # target_train=Variable(torch.ones(size=(1,10,1,200,200)))

                input_train = data[0].squeeze(axis=2)
                target_train = data[1][:, :, :, :, :].squeeze(axis=2)
                # ====================DALI===============#
                # data= data[0]
                # input_train=data['inputs']
                # target_train=data['target']
                optimizer.zero_grad()
                # if model.radarnet.predictor.Who.weight.grad is not None:
                # print('before backward gradient: ', model.radarnet.predictor.Who.weight.grad.max())

                # model.zero_grad()

                # print(input_train.size())
                input_train = normalizer(input_train)
                # target_train= normalizer(target_train)
                input_train, target_train = Variable(input_train), Variable(
                    target_train)
                if use_gpu:
                    input_train, target_train = input_train.cuda(
                    ), target_train.cuda()

                out_train = model(input_train)
                loss = criterion(target_train, out_train)

                loss.backward()
                # if model.radarnet.predictor.Who.weight.grad is not None:
                # print('after backward gradient: ', model.radarnet.predictor.Who.weight.grad.max())
                # print('gradient: ', model.predictor.U_z.weight.grad.max())
                # print('gradient: ', model.predictor.W_r.weight.grad.max())
                # print('gradient: ', model.predictor.U_r.weight.grad.max())
                # print('gradient: ', model.predictor.W_c.weight.grad.max())
                # print('gradient: ', model.predictor.U_c.weight.grad.max())

                optimizer.step()

                # output_train= torch.clamp(out_train, 0, 1)
                # ================NORMAL================ #
                print("[epoch %d/%d][event %d/%d][step %d/%d]  obj: %.4f " %
                      (epoch + 1, num_epoches, e, len(EVENTS), i + 1,
                       len(loader_train), loss.item()))
                # print("[epoch %d/%d][step %d/%d]  obj: %.4f "%(epoch+1,num_epoches,  i+1,len(loader_train),loss.item()))
                # ================DALI================ #
                # print("[epoch %d/%d][event %d/%d][step %d]  obj: %.4f "%(epoch+1,num_epoches,e, len(EVENTS), i+1,-loss.item()))

                # print(list(model.parameters()))
                if step % 10 == 0:
                    writer.add_scalar('loss', -loss.item())

                step += 1

    #save model
        if epoch % epoch_to_save == 0:
            torch.save(
                model.state_dict(),
                os.path.join(logging_path, 'net_epoch%d.pth' % (epoch + 1)))
        end = time.time()
        print('One epoch costs %.2f minutes!' % ((end - start) / 60.))

        scheduler.step(epoch)

    torch.save(model.state_dict(), os.path.join(logging_path, 'newest.pth'))
コード例 #15
0
# test the model
# @tf.function
def test_step(model, images, loss_fun):
    m_i, m_k, map_att, gtmap, score, y_true, y_pred, n_classes, batch_size = model(images)
    loss = loss_fun(m_i, m_k, map_att, gtmap, score, y_true, y_pred, n_classes, batch_size)
    print("Current TestLoss: {}".format(loss))
'''

# testing by running

if __name__ == '__main__':
    # tf.compat.v1.enable_eager_execution()
    gpu = tf.config.experimental.list_physical_devices('GPU')
    print("Num GPUs Available: ", len(gpu))
    # path_root = os.path.abspath(os.path.dirname(__file__))
    database = DataSet('../basemodel') #"/content/gdrive/My Drive/data")  # path_root)

    # DS, DS_test = database.load_gpu()  # image_batch, label_batch
    DS = database.load(GPU=False, train=True, batch_size=4)
    # DS_test = database.load(GPU=False, train=False, batch_size = 32)

    modelaki = FinalModel()

    loss_fun = Loss().loss_MA
    opt_fun = tf.keras.optimizers.SGD(learning_rate=0.001, momentum=0.9)

    # ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt_fun, net=modelaki)
    # manager = tf.train.CheckpointManager(ckpt, path_root + '/tf_ckpts',
    #                                     max_to_keep=3)  # keep only the three most recent checkpoints
    # ckpt.restore(manager.latest_checkpoint)  # pickup training from where you left off
コード例 #16
0
# do gradient descent etc. followed by saving our model for later testing
from dataloader import DataSet,create_samplers
from model import Model
from options import TrainOptions
import torch
from torchvision.transforms import *
import torch.optim as optim
import numpy as np 
from visualizer import Visualizer
import os
# Get the Hyperparaeters 
opt = TrainOptions().parse()

import matplotlib.pyplot as plt

sample_dataset = DataSet(opt,"./dataset/T1-train/img/", "./dataset/T1-train/GT/T1-GT.xml","./dataset/T2-Train/img/" )

train_sampler,val_sampler = create_samplers(sample_dataset.__len__(),opt.split_ratio)
sample_loader = torch.utils.data.DataLoader(sample_dataset,sampler=train_sampler,batch_size=opt.batch_size,num_workers=opt.workers)
sample_val_loader = torch.utils.data.DataLoader(sample_dataset,sampler=val_sampler,batch_size=opt.val_batch_size,num_workers=opt.workers//5,shuffle=False)

# Check if gpu available or not
device = torch.device("cuda" if (torch.cuda.is_available() and opt.use_gpu) else "cpu")
opt.device = device

# Load the model and send it to gpu
model = Model(opt)
model = model.to(device)
if opt.use_gpu:	
	model = torch.nn.DataParallel(model, device_ids=opt.gpus)
コード例 #17
0
        # sum (and normalize?) the scores and mapped features
        score = tf.add(
            tf.add(global_theta, local_scores0),
            local_scores1)  #tf.math.reduce_sum(, axis=1, keepdims=True)
        # avg_score = tf.multiply(sum_gll, 1.0 / 3.0)
        # compute ||~phi_i - ~Ci|| and ||~phi_i - ~Cj||, '~' is normalization
        # l2loss = self.l2loss(
        #    [tf.math.l2_normalize(tf.squeeze(out)), tf.math.l2_normalize(tf.transpose(center, perm=[0, 2, 1]))])
        phi_mapped = tf.add(tf.add(global_phi, local0_phi), local1_phi)
        #avg_phi = tf.multiply(sum_gll, 1.0 / 3.0)

        y_pred = self.classifier(score)

        return m0, m1, mask0, mask1, score, phi_mapped, y_pred, self.C


if __name__ == '__main__':
    # just for testing
    path_root = os.path.abspath(
        os.path.dirname(__file__))  # '/content/gdrive/My Drive/data'
    bird_data = DataSet("D:/MY2/ADDL/DD2412_project/basemodel")
    PHI = bird_data.get_phi(set=0)
    #w = bird_data.get_w(alpha=1)  # (50*150)
    #train_class_list, test_class_list = bird_data.get_class_split(mode="easy")
    # only take 1000 images for local test
    train_ds = bird_data.load(GPU=False, train=True, batch_size=4)
    # test_ds = bird_data.load(GPU=False, train=False, batch_size=32)
    image_batch, label_batch = next(iter(train_ds))
    test_model = FinalModel()
    m0, m1, mask0, mask1, scores, phi, y_pred, C = test_model(image_batch, PHI)
コード例 #18
0
if __name__ == "__main__":

    df = Reader.read_csv("data",
                         "NSE_Abbott India Limited.csv",
                         config="default",
                         streamType="csv",
                         columns="",
                         filter="full",
                         count=5,
                         header="1",
                         transformers=None)
    shaper = DataSet.shape_data_frame(df,
                                      '',
                                      x_columns='1:9',
                                      y_columns='3',
                                      x_dimention='3',
                                      y_dimention='1',
                                      y_offset=1,
                                      test_data_size=20)
    normalizer = Utils.get_preprocessing_scaler(min_max_tuple=(-1, 1))
    shaper = Utils.fit_transform(shaper, normalizer)
    model_def = ModelBuilder.create_model(
        'Keras Sequential Model',
        shape='2,2',
        config=('{"loss_function":"mean_absolute_error","optimizer":"adam"}', [
            '{ "layer_type":"LSTM" ,"activation":"tanh","optimizer":"Adam","threshold":"100","input_shape":"1,8"}',
            '{ "layer_type":"Dropout" ,"activation":"sigmoid","optimizer":"sgd","threshold":"0.2","input_shape":""}',
            '{ "layer_type":"Dense" ,"activation":"linear","optimizer":"Adam","threshold":"1","input_shape":""}'
        ]))
    model = ModelBuilder.train_model(model_def, shaper, 'true')
    result = ModelBuilder.predict_model(model, shaper)
コード例 #19
0
ファイル: demo_ml_10M.py プロジェクト: qingquansong/CVRCF
def main(args):
    # Select running device
    if args.gpu is None:
        select_gpu()
    else:
        os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)

    # Initialize the latent factors
    hidden_U = np.zeros([args.num_of_u, args.hidden_state_siz_u])
    hidden_V = np.zeros([args.num_of_v, args.hidden_state_siz_v])

    model = CVRCF(args)

    # Define elbo loss and optimizer
    train_all = model.train()

    pred_all = model.predict_ratings()

    optimizer = tf.train.AdamOptimizer(learning_rate=args.lr)  # AdamOptimizer
    train_all['train_op'] = optimizer.minimize(-train_all['elbo'])

    # Start training
    config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)
    # config.gpu_options.allow_growth = True
    sess = tf.InteractiveSession(config=config)
    init = tf.global_variables_initializer()

    # Run the initializer
    sess.run(init)

    # Model training
    for epoch in range(1, args.n_epoch + 1):
        dd = 0
        mark_u_time_end = np.zeros(args.num_of_u)
        mark_v_time_end = np.zeros(args.num_of_v)

        train_data = DataSet('data/train.txt', args, mark_u_time_end, mark_v_time_end)

        start_time = time.time()

        while train_data.finish != 1:

            # Get Data Batch
            data, batch_all = get_batch_data(train_data)

            # Test
            MSE, RMSE, MSE1, RMSE1 = test_one_step(batch_all, model, sess, pred_all, hidden_U, hidden_V)

            # Run optimization op (backprop)
            sp_u_indices, sp_u_shape, sp_u_val, \
            sp_u_indices_res, sp_u_shape_res, sp_u_val_res, \
            sp_v_indices, sp_v_shape, sp_v_val, \
            sp_v_indices_res, sp_v_shape_res, sp_v_val_res, \
            inputs_u_idx, inputs_v_idx, \
            inputs_idx_pair, all_data, \
            siz, mark_new_user_movie, \
            re_sp_u_ids, u_seg_ids, \
            re_sp_v_ids, v_seg_ids = batch_all

            dd += 1
            loss0, loss = 0, 100
            t = 0
            pbar = tqdm(total=args.max_t)
            while abs(loss - loss0) / abs(loss) > 1e-2 and t < args.max_t:
                t += 1
                pbar.update(1)
                _, loss, hidden_us, hidden_vs = sess.run([train_all['train_op'], train_all['elbo'],
                                                          train_all['hidden_us'], train_all['hidden_vs']],
                                                         feed_dict={
                                                             model.inputs_u: (sp_u_indices, sp_u_val, sp_u_shape),
                                                             model.inputs_v: (sp_v_indices, sp_v_val, sp_v_shape),
                                                             model.inputs_u_res_1:
                                                                 (sp_u_indices_res, sp_u_val_res, sp_u_shape_res),
                                                             model.inputs_v_res_1:
                                                                 (sp_v_indices_res, sp_v_val_res, sp_v_shape_res),
                                                             model.re_sp_u_ids: re_sp_u_ids,
                                                             model.u_seg_ids: u_seg_ids,
                                                             model.re_sp_v_ids: re_sp_v_ids,
                                                             model.v_seg_ids: v_seg_ids,
                                                             model.h_U: hidden_U[inputs_u_idx, :],
                                                             model.h_V: hidden_V[inputs_v_idx, :],
                                                             model.inputs_u_idx: inputs_u_idx,
                                                             model.inputs_v_idx: inputs_v_idx,
                                                             model.inputs_idx_pair: inputs_idx_pair[:, 0:4],
                                                             model.ratings: inputs_idx_pair[:, 4]})
                pbar.set_description(
                    'Training Epoch: %d, Batch: %d, loss = %.3g, RMSE = %.3g' % (epoch, dd, loss, RMSE))
            pbar.close()
            sys.stdout.flush()

            for i in range(inputs_u_idx.shape[0]):
                tmp_idx = int(max(inputs_idx_pair[inputs_idx_pair[:, 0] == i, 2]))
                hidden_U[inputs_u_idx[i], :] = hidden_us[i, tmp_idx, :]
            for j in range(inputs_v_idx.shape[0]):
                tmp_idx = int(max(inputs_idx_pair[inputs_idx_pair[:, 1] == j, 3]))
                hidden_V[inputs_v_idx[j], :] = hidden_vs[j, tmp_idx, :]

        if not (train_data.finish == 0 or epoch == args.n_epoch):
            # Reinitialize to default value after finishing one epoch
            print("\n One Epoch Finished! \n")
            hidden_U = np.zeros([args.num_of_u, args.hidden_state_siz_u])
            hidden_V = np.zeros([args.num_of_v, args.hidden_state_siz_v])

        print("--- %s seconds ---" % (time.time() - start_time))

        train_data.finish = 0
    print("Optimization Finished!")

    # Testing Phase
    testing_phase(model, sess, train_all, pred_all, mark_u_time_end, mark_v_time_end, hidden_U, hidden_V)

    # Final Performance
    N = np.load("results/N_ml_10M.npy")
    N1 = np.load("results/N1_ml_10M.npy")
    RMSE = np.load("results/RMSE_ml_10M.npy")
    RMSE1 = np.load("results/RMSE1_ml_10M.npy")
    avg_RMSE = np.sqrt(np.sum(RMSE ** 2 * N) / np.sum(N))
    avg_RMSE1 = np.sqrt(np.sum(RMSE1 ** 2 * N1) / np.sum(N1))
    print('Average RMSE w/o new users & items: ', avg_RMSE)
    print('Average RMSE with new users & items: ', avg_RMSE1)
コード例 #20
0
ファイル: main.py プロジェクト: arunsoman/deeplearning4all
tokenizer = None
label_encoder = None
encoded_labels = None
inputdata = None
test_df = None
text_X = None
test_Y = None
shaper = None
model = None


from reader import Reader
from dataloader import Utils
from dataloader import DataSet
from model import ModelBuilder
from writer import Writer

if __name__ == "__main__" :

  df = Reader.read_csv("data","sentiment_train.csv",config="default",streamType="csv",columns="0,1",filter="full",count=5)
  tokenizer =  Utils.get_text_tokenizer(df,1)
  label_encoder = Utils.get_label_encoder(df,0)
  encoded_labels = DataSet.get_encodered_labels(df,0,label_encoder)
  inputdata = DataSet.text_to_matrix(df,1,tokenizer)
  test_df = Reader.read_csv("data","sentiment_test.csv",config="default",streamType="df",columns="0,1",filter="full",count=5)
  text_X = DataSet.text_to_matrix(test_df,1,tokenizer)
  test_Y = DataSet.get_encodered_labels(test_df,0,label_encoder)
  shaper = tuple([inputdata, encoded_labels, text_X, test_Y])
  model = ModelBuilder.train_model((ModelBuilder.create_model('KNN Classifier',shape='2,2',config='{"n_neighbors":1,"algorithm":"ball_tree","weights":"distance"}')),shaper,'true')
  Writer.write_csv((DataSet.get_label((ModelBuilder.predict_model(model,text_X)),0,label_encoder)),"default")
コード例 #21
0
                              weight_decay=5 * 1e-4,
                              momentum=0.9)

    # MODEL
    #net = FinalModel()
    #new_root = tf.train.Checkpoint(net=net)
    #status = new_root.restore(tf.train.latest_checkpoint('./tf_ckpts/'))
    net = FinalModel()
    ckpt = tf.train.Checkpoint(step=tf.Variable(1, dtype=tf.int32),
                               optimizer=opt,
                               net=net)
    ckpt.restore(tf.train.latest_checkpoint('./tf_ckpts/'))

    #DATA
    path_root = os.path.abspath(os.path.dirname(__file__))
    bird_data = DataSet("/Volumes/Watermelon")  # DataSet(path_root)
    phi_train = bird_data.get_phi(set=0)
    w = bird_data.get_w(alpha=1)  # (50*150)
    train_class_list, test_class_list = bird_data.get_class_split(mode="easy")
    train_ds = bird_data.load(GPU=False, train=True, batch_size=32)
    #test_ds = bird_data.load(GPU=False, train=False, batch_size=4) #.load_gpu(batch_size=4)
    PHI = bird_data.get_phi(set=0)
    for im, label in train_ds:
        #im_path = "/Volumes/Watermelon/CUB_200_2011/CUB_200_2011/images/059.California_Gull/"
        #img = tf.io.read_file(im_path)
        #im = database.decode_img(img)
        m0, m1, mask0, mask1, scores, phi, y_pred, C = net(
            im, PHI)  #tf.expand_dims(im,0)

    nu = 50
    ns = 150
コード例 #22
0
from dataloader import DataSet
from model import Model
from options import TestOptions
import torch
from torchvision.transforms import *
import numpy as np
import os
from sklearn.metrics import confusion_matrix, roc_curve
import cv2
torch.multiprocessing.set_sharing_strategy('file_system')
import matplotlib.pyplot as plt
# Get the Hyperparaeters
opt = TestOptions().parse()

test_dir = "/media/shubh/Windows/home/shubh/findit/Find_it_pristine_patches/"
target_dataset = DataSet(opt, test_dir)
target_loader = torch.utils.data.DataLoader(target_dataset,
                                            batch_size=opt.val_batch_size,
                                            num_workers=30,
                                            shuffle=False)

# Load the model and send it to gpu
test_transforms = transforms.Compose([
    transforms.Normalize(mean=[0., 0., 0.],
                         std=[1 / 0.229, 1 / 0.224, 1 / 0.225]),
    transforms.Normalize(mean=[-0.485, -0.456, -0.406], std=[1., 1., 1.])
])

device = torch.device("cuda" if (
    torch.cuda.is_available() and opt.use_gpu) else "cpu")
opt.device = device
コード例 #23
0
import time
import os

import numpy as np

import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d, upsample_2d

import tensorflow as tf

from dataloader import DataSet
data = DataSet()

from helpers import (dice_score)

from config import (batch_size, IMG_WIDTH, IMG_HEIGHT, IMG_CHANNELS,
                    learning_rate)

# Network Parameters
tf.reset_default_graph()
x = tf.placeholder(tf.float32, [None, IMG_WIDTH, IMG_HEIGHT, IMG_CHANNELS])
y = tf.placeholder(tf.float32, [None, IMG_WIDTH, IMG_HEIGHT, IMG_CHANNELS])

################Create Model######################
conv1 = conv_2d(x, 32, 3, activation='relu', padding='same', regularizer="L2")
conv1 = conv_2d(conv1,
                32,
                3,
                activation='relu',
                padding='same',