def __init__(self, model_name, config="auto"):
        """
        Trainer class is used for managing the end-to-end training procedure for the model, through to evaluation.
        :param model_name: string name of model
        :param config: config object containing details of model and training hyper-parameters.  Non-default parameters
        can be added by including them as dictionary, .e.g config={"model_size": 256}
        :raises ModelNotFoundError if the model_name given does not match the list of available models

        To run the training process:

            trainer = Trainer("baseline")
            trainer.train()
            # Some training happens
            # ...
            results = trainer.evaluate(["bleu", "rouge"])
            # {"bleu": 0.67, "rouge-l": 0.5}
        """
        if model_name not in ModelNotFoundError.model_names:
            raise ModelNotFoundError()

        if not os.path.isdir("Save"):
            os.mkdir("Save")

        self.config = Config(model_name)
        self.tokenizer = self.config.tokenizer
        if config != "auto":
            assert type(config) is dict
            for key, value in config.items():
                self.config.__setattr__(key, value)
        self.dataloader = Dataloader(self.config,
                                     multitask=self.config.multitask)
        self.transformer = Transformer(self.config)

        opt = tf.keras.optimizers.Adam(learning_rate=self.config.learning_rate)
        self.transformer.compile(optimizer=opt)
Esempio n. 2
0
    def __init__(self, nrun=-1):
        self.args = Namespace(
            cuda=True,
            ndf=8,
            nef=8,
            wkld=0.01,
            gbweight=100,
            nlatent=9, 
            nechannels=6,
            ngchannels=1,
            resume="",
            save="mini-save",
            loader_train="h5py",
            loader_test="h5py",
            dataset_test=None,
            dataset_train="filelist",
            split_test=0.0,
            split_train=1.0,
            filename_test="./data/data.txt",
            filename_train="./data/data.txt",
            batch_size=64,
            resolution_high=512,
            resolution_wide=512,
            nthreads=32,
            images="mini-save/images",
            pre_name="save",
        )
        latest_save = sorted(list(Path("results").iterdir()))[nrun]
        self.rundate = latest_save.name
        latest_save = latest_save.joinpath("Save")
        latest_save = {"netG": latest_save}
        self.args.resume = latest_save
        checkpoints = Checkpoints(self.args)

        # Create model
        models = Model(self.args)
        self.model, self.criterion = models.setup(checkpoints)

        # Data loading
        self.dataloader = Dataloader(self.args)
        self.loader = self.dataloader.create(flag="Test")
        print("\t\tBatches:\t", len(self.loader))

        self.resolution_high = self.args.resolution_high
        self.resolution_wide = self.args.resolution_wide
        self.batch_size = self.args.batch_size
        self.ngchannels = self.args.ngchannels
        self.nechannels = self.args.nechannels
        self.nlatent = self.args.nlatent
        self.composition = torch.FloatTensor(self.batch_size, self.ngchannels, self.resolution_high, self.resolution_wide)
        self.metadata = torch.FloatTensor(self.batch_size, self.nechannels, self.resolution_high, self.resolution_wide)

        if self.args.cuda:
            self.composition = self.composition.cuda()
            self.metadata = self.metadata.cuda()

        self.composition = Variable(self.composition)
        self.metadata = Variable(self.metadata)

        self.imgio = plugins.ImageIO(self.args.images, self.args.pre_name)
Esempio n. 3
0
def main():
    # parse the arguments
    args = parser.parse_args()
    random.seed(args.manual_seed)
    torch.manual_seed(args.manual_seed)
    utils.saveargs(args)

    # initialize the checkpoint class
    checkpoints = Checkpoints(args)

    # Create Model
    models = Model(args)
    model, criterion = models.setup(checkpoints)

    # Data Loading
    dataloader = Dataloader(args)
    loaders = dataloader.create()

    # The trainer handles the training loop
    trainer = Trainer(args, model, criterion)
    # The trainer handles the evaluation on validation set
    tester = Tester(args, model, criterion)

    # start training !!!
    loss_best = 1e10
    for epoch in range(args.nepochs):

        # train for a single epoch
        loss_train = trainer.train(epoch, loaders)
        loss_test = tester.test(epoch, loaders)

        if loss_best > loss_test:
            model_best = True
            loss_best = loss_test
            checkpoints.save(epoch, model, model_best)
Esempio n. 4
0
    def main(self):
        if  args['model_arch'] == 'li2016':
            args['batchSize'] = 16
        elif args['model_arch'] == 'bert2bert':
            args['batchSize'] = 8
        elif args['model_arch'] == 'bart':
            args['batchSize'] = 8

        self.textData = Dataloader('fb')
        self.start_token = self.textData.word2index['START_TOKEN']
        self.end_token = self.textData.word2index['END_TOKEN']
        args['vocabularySize'] = self.textData.getVocabularySize()
        args['emo_labelSize'] = len(self.textData.index2emotion)
        print(self.textData.getVocabularySize())

        print('Using ',args['model_arch'] ,' model.')
        if  args['model_arch'] == 'li2016':
            self.model = Model(self.textData.word2index, self.textData.index2word)
        elif args['model_arch'] == 'bert2bert':
            self.model = BERTEncDecModel()
            self.model.train()
        elif args['model_arch'] == 'bart':
            self.model = BARTModel()
            self.model.train()

        self.model = self.model.to(args['device'])
        self.train()
    def __init__(self, data_path='Bike-Sharing-Dataset/hour.csv'):
        '''Initialize the random forest model.
        
        Keyword Arguments:
            data_path {str} -- Path to the Bike Sharing Dataset. (default: {'Bike-Sharing-Dataset/hour.csv'})            
        '''

        # Make results reproducible
        random.seed(100)
        
        # Load data form bike sharing csv
        self.data = {}
        dataloader = Dataloader(data_path)
        self.data['full'] = dataloader.getFullData()

        # Define feature and target variables
        self.features= ['season', 'mnth', 'hr', 'holiday', 'weekday', 'workingday', 'weathersit', 'temp', 'atemp', 'hum', 'windspeed']
        self.target = ['cnt']

        # Convert pandas frame into samples and labels
        self.samples, self.labels = {}, {}
        self.samples['full'] = self.data['full'][self.features].values
        self.labels['full'] = self.data['full'][self.target].values.ravel()    

        # Define model 
        self.model = RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=None,
           max_features='auto', max_leaf_nodes=None,
           min_impurity_decrease=0.0, min_impurity_split=None,
           min_samples_leaf=1, min_samples_split=4,
           min_weight_fraction_leaf=0.0, n_estimators=200, n_jobs=None,
           oob_score=False, random_state=100, verbose=0, warm_start=False)
Esempio n. 6
0
    def __init__(self, config):
        self.z_dim = config["train"]["z-dim"]
        self.c_dim = config["train"]["c-dim"]
        self.x_dim = config["train"]["x-dim"]
        self.y_dim = config["train"]["y-dim"]
        self.scale = config["train"]["scale"]

        self.imchannels = config["train"]["imchannels"]
        self.imshape = (self.x_dim, self.y_dim, self.imchannels)

        self.dataloader = Dataloader(config)
        self.num_classes = config["train"]["num-classes"]

        optimizer = Adam(0.0002, 0.5)
        losses = ['binary_crossentropy', 'sparse_categorical_crossentropy']

        # Build and compile the discriminator
        self.discriminator = self.build_discriminator()
        self.discriminator.compile(loss=losses,
                                   optimizer=optimizer,
                                   metrics=['accuracy'])

        self.generator = self.build_generator(self.x_dim, self.y_dim)

        self.combined = self.build_combined()
        self.combined.compile(loss=losses, optimizer=optimizer)
Esempio n. 7
0
def main():

    train_start = time.time()
    print("TRAINING")
    Params = namedtuple('Params', [
        'batch_size', 'embed_size', 'rnn_size', 'alignment_size', 'alpha',
        'phase', 'num_epoch'
    ])

    data = Dataloader('data/hansard/train.fr', 'data/hansard/train.en',
                      'data/hansard/word2idx.fr', 'data/hansard/word2idx.en')
    mparams = Params(50, 128, 256, 256, 1e-3, 'TRAIN', 5)
    with tf.Graph().as_default():
        RNNsearch(data, mparams).train()

    train_end = time.time() - train_start
    print("--- %s seconds ---" % (train_end))

    print("TEST")
    data.read_data('data/hansard/dev.fr', 'data/hansard/dev.en')
    mparams = Params(50, 128, 256, 256, 1e-3, 'TEST', 1)
    with tf.Graph().as_default():
        RNNsearch(data, mparams).test()

    print("--- %s seconds ---" % (time.time() - train_end))
Esempio n. 8
0
def train(datalimit = 0):
    #Loading Data
    dataloader = Dataloader(datalimit)
    train_texts, train_classes, test_texts, test_classes = dataloader.load_all_datasets()
    #train_texts, train_classes, test_texts, test_classes = dataloader.load_small_fake_data()

    model = CombinedClassifier()
    model.train_and_save(train_texts, train_classes)
    model.eval(test_texts, test_classes)
Esempio n. 9
0
def main(config, difficulty,type):
    logger = config.get_logger('train')

    if torch.cuda.is_available():
        device = torch.device('cuda:0')
    else:
        device = torch.device('cpu')
    batch_size = config['data_loader']['args']['batch_size']
    tgt_preprocessing = None
    src_preprocessing = None
    train_loader = Dataloader(
        device=device, difficulty=difficulty,type=type,
        src_preprocessing=src_preprocessing, tgt_preprocessing=tgt_preprocessing,
        batch_size=batch_size)

    valid_loader = Dataloader(
        device=device, difficulty=difficulty,type=type,
        src_preprocessing=src_preprocessing, tgt_preprocessing=tgt_preprocessing,
        batch_size=batch_size, train=False)
    
    model_args = config['arch']['args']
    model_args.update({
        'src_vocab': train_loader.src_vocab,
        'tgt_vocab': train_loader.tgt_vocab,
        'sos_tok': SOS_TOK,
        'eos_tok': EOS_TOK,
        'pad_tok': PAD_TOK,
        'device': device
    })
    model = getattr(models, config['arch']['type'])(**model_args)
    weight = torch.ones(len(train_loader.tgt_vocab))
    criterion = AvgPerplexity(
        ignore_idx=train_loader.tgt_vocab.stoi[PAD_TOK],
        weight=weight)

    criterion.to(device)

    optimizer = get_optimizer(
        optimizer_params=filter(
            lambda p: p.requires_grad, model.parameters()),
        args_dict=config['optimizer'])

    metrics_ftns = [Accuracy(
        train_loader.tgt_vocab.stoi[PAD_TOK])]
    # for param in model.parameters():
    #     param.data.uniform_(-0.08, 0.08)
    trainer = Trainer(
        model=model,
        criterion=criterion,
        metric_ftns=metrics_ftns,
        optimizer=optimizer,
        config=config,
        data_loader=train_loader,
        valid_data_loader=valid_loader,
        log_step=1, len_epoch=200
    )
    trainer.train()
def do_test(loadPath):
    print('Loading model from: %s' % loadPath)
    with open(loadPath, 'r') as fileId:
        loaded = pickle.load(fileId);

    #------------------------------------------------------------------------
    # build dataset, load agents
    #------------------------------------------------------------------------
    params = loaded['params'];
    data = Dataloader(params);

    team = Team(params);
    team.loadModel(loaded);
    team.evaluate();
    #------------------------------------------------------------------------
    # test agents
    #------------------------------------------------------------------------
    dtypes = ['train']
    for dtype in dtypes:
        # evaluate on the train dataset, using greedy policy
        images, tasks, labels = data.getCompleteData(dtype);
        # forward pass
        preds, _, talk, talk_list = team.forward(Variable(images), Variable(tasks), True);

        options = dict()
        options['qOutVocab'] = 3
        options['aOutVocab'] = 4
        m1,m2,ic1,ic2,h1,h2 = all_metrics(team, preds, talk_list, options)

        # compute accuracy for first, second and both attributes
        firstMatch = preds[0].data == labels[:, 0].long();
        secondMatch = preds[1].data == labels[:, 1].long();
        matches = firstMatch & secondMatch;
        atleastOne = firstMatch | secondMatch;

        # compute accuracy
        firstAcc = 100 * torch.mean(firstMatch.float());
        secondAcc = 100 * torch.mean(secondMatch.float());
        atleastAcc = 100 * torch.mean(atleastOne.float());
        accuracy = 100 * torch.mean(matches.float());
        print('\nOverall accuracy [%s]: %.2f (f: %.2f s: %.2f, atleast: %.2f)'\
                        % (dtype, accuracy, firstAcc, secondAcc, atleastAcc));

        # pretty print
        talk = data.reformatTalk(talk, preds, images, tasks, labels);
        if 'final' in loadPath:
            savePath = loadPath.replace('final', 'chatlog-'+dtype);
        elif 'inter' in loadPath:
            savePath = loadPath.replace('inter', 'chatlog-'+dtype);
        savePath = savePath.replace('pickle', 'json');
        print('Saving conversations: %s' % savePath)
        with open(savePath, 'w') as fileId: json.dump(talk, fileId);
        saveResultPage(savePath);

        res1 = accuracy, firstAcc, secondAcc, atleastAcc
        res2 = m1,m2,ic1,ic2,h1,h2
        return res1, res2
Esempio n. 11
0
    def test(self, args):
        print("\n [*] Testing....")

        if not os.path.exists(args.output_path):
            os.makedirs(args.output_path)

        W = tf.placeholder(tf.int32)
        H = tf.placeholder(tf.int32)

        self.saver = tf.train.Saver()

        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())
        self.sess.run(init_op)

        if args.checkpoint_path != '':
            self.saver.restore(self.sess, args.checkpoint_path)
            print(" [*] Load model: SUCCESS")
        else:
            print(" [*] Load failed...neglected")
            print(" [*] End Testing...")
            raise ValueError('self.checkpoint_path == ')

        dataloader = Dataloader(file=args.dataset_testing)
        left_files, gt_files = dataloader.read_list_file()
        self.prediction_resized = tf.nn.sigmoid(
            tf.image.resize_image_with_crop_or_pad(image=self.prediction,
                                                   target_height=H,
                                                   target_width=W))

        print(" [*] Start Testing...")
        for i, item in enumerate(left_files):
            print(" [*] Loading test image:" + left_files[i])
            disp_patches = dataloader.get_testing_image(left_files[i])
            batch_disp = self.sess.run([disp_patches])
            shape = batch_disp[0].shape
            disp_patches = tf.image.resize_image_with_crop_or_pad(
                image=disp_patches,
                target_height=self.image_height,
                target_width=self.image_width)
            batch_disp = self.sess.run([disp_patches])
            start = time.time()
            prediction = self.sess.run(self.prediction_resized,
                                       feed_dict={
                                           self.disp: batch_disp,
                                           H: shape[0],
                                           W: shape[1]
                                       })
            current = time.time()
            confmap_png = tf.image.encode_png(
                tf.cast(tf.scalar_mul(65535.0, tf.squeeze(prediction, axis=0)),
                        dtype=tf.uint16))
            output_file = args.output_path + left_files[i].strip().split(
                '/')[-1]
            self.sess.run(tf.write_file(output_file, confmap_png))
            print(" [*] CCNN confidence prediction saved in:" + output_file)
            print(" [*] CCNN running time:" + str(current - start) + "s")
Esempio n. 12
0
    def non_cluster_pytorchnet(pop, args):
        for i in range(len(pop)):
            torch.manual_seed(args.manual_seed)

            # Create Model
            models = Model(args, pop[i].genome)
            model, criterion, num_params = models.setup()
            model = calculate_flops.add_flops_counting_methods(model)

            # Data Loading
            dataloader = Dataloader(args)
            loaders = dataloader.create()

            # The trainer handles the training loop
            trainer = Trainer(args, model, criterion)
            # The trainer handles the evaluation on validation set
            tester = Tester(args, model, criterion)

            # start training !!!
            acc_test_list = []
            acc_best = 0
            train_time_start = time.time()
            for epoch in range(args.nepochs):
                # train for a single epoch

                if epoch == 0:
                    model.start_flops_count()
                loss_train, acc_train = trainer.train(epoch, loaders)
                loss_test, acc_test = tester.test(epoch, loaders)
                acc_test_list.append(acc_test)

                if epoch == 0:
                    n_flops = (model.compute_average_flops_cost() / 1e6 / 2)
                # update the best test accu found so found
                if acc_test > acc_best:
                    acc_best = acc_test

                # print("Epoch {}, train loss = {}, test accu = {}, best accu = {}, {} sec"
                #       .format(epoch, np.average(loss_train), acc_test, acc_best, time_elapsed))

                if np.isnan(np.average(loss_train)):
                    break

            # end of training
            time_elapsed = np.round((time.time() - train_time_start), 2)
            pop[i].fitness[0] = 100.0 - np.mean(acc_test_list[-3:])
            pop[i].fitness[1] = n_flops
            pop[i].n_params = num_params
            pop[i].n_FLOPs = n_flops
            print(
                "Indv {:d}:, test error={:0.2f}, FLOPs={:0.2f}M, n_params={:0.2f}M, {:0.2f} sec"
                .format(i, pop[i].fitness[0], n_flops, num_params / 1e6,
                        time_elapsed))

        return
Esempio n. 13
0
    def __init__(self, config):
        # Configure data loader
        self.dataloader = Dataloader(config)
        self.dataloader.load()

        self.save_path = os.path.join(config["paths"]["save"], config["run-title"])


        self.d_c_stream = load_model(config["paths"]["load-d-c-stream"], custom_objects={"InstanceNormalization": InstanceNormalization})
        self.d_r_stream = load_model(config["paths"]["load-d-r-stream"], custom_objects={"InstanceNormalization": InstanceNormalization})
        self.e_c_stream = load_model(config["paths"]["load-e-c-stream"], custom_objects={"InstanceNormalization": InstanceNormalization})
Esempio n. 14
0
def test(path):
    model.load_weights(save_model)
    dataloader = Dataloader(path)
    files = (os.listdir(path))
    for file in files:
        test_data = dataloader.load_predict_data(os.path.join(path, file),
                                                 input_size=(H, W))
        time_start = time.time()
        result = model.predict(test_data)
        time_end = time.time()
        pred = np.argmax(result, axis=1)
        print('classes : %s \t cost : %04f s' % (pred, time_end - time_start))
Esempio n. 15
0
    def test(self, args):
        print("[*] Testing....")

        if not os.path.exists(args.output_path):
            os.makedirs(args.output_path)

        self.saver = tf.train.Saver()

        self.sess.run(tf.global_variables_initializer())
        self.sess.run(tf.local_variables_initializer())

        if args.checkpoint_path != '':
            self.saver.restore(self.sess, args.checkpoint_path)
            print(" [*] Load model: SUCCESS")
        else:
            print(" [*] Load failed...neglected")
            print(" [*] End Testing...")
            raise ValueError('self.checkpoint_path == ')

        dataloader = Dataloader(file=args.dataset_testing,
                                isTraining=self.isTraining)
        disp_batch = dataloader.disp
        line = dataloader.disp_filename
        num_samples = dataloader.count_text_lines(args.dataset_testing)

        prediction = tf.pad(
            tf.nn.sigmoid(self.prediction),
            tf.constant([[0, 0], [
                self.radius,
                self.radius,
            ], [self.radius, self.radius], [0, 0]]), "CONSTANT")
        png = tf.image.encode_png(
            tf.cast(tf.scalar_mul(65535.0, tf.squeeze(prediction, axis=0)),
                    dtype=tf.uint16))

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        print(" [*] Start Testing...")
        for step in range(num_samples):
            batch, filename = self.sess.run([disp_batch, line])
            print(" [*] Test image:" + filename)
            start = time.time()
            confidence = self.sess.run(png, feed_dict={self.disp: batch})
            current = time.time()
            output_file = args.output_path + filename.strip().split('/')[-1]
            self.sess.run(tf.write_file(output_file, confidence))
            print(" [*] CCNN confidence prediction saved in:" + output_file)
            print(" [*] CCNN running time:" + str(current - start) + "s")

        coord.request_stop()
        coord.join(threads)
Esempio n. 16
0
def main():
    version_name = 'v1_25dBm_evaluation'
    info = 'TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_' + version_name
    print(info)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    #device = 'cpu'
    print(device)

    t = 5
    batch_size = 16

    print('batch_size:%d' % (batch_size))

    acur_eval = []
    loss_eval = []
    BL_eval = np.zeros((10, 5, t))
    rank_eval = np.zeros((10, 64, t))
    pdf_eval = np.zeros((10, 101, t))

    for tt in range(t):
        print('Train %d times' % (tt))
        model_name = 'TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_25dBm_' + str(
            tt) + '_MODEL.pkl'
        model = torch.load(model_name)
        model.to(device)
        model.eval()

        eval_loader_name = '/usr/mk/TCOM/dataset/testing_25dBm'
        eval_loader = Dataloader(path=eval_loader_name,
                                 batch_size=batch_size,
                                 device=device)
        eval_loader.reset()
        acur, losses, rank, BL, pdf = eval(model, eval_loader, device)
        acur_eval.append(acur)
        loss_eval.append(losses)
        rank_eval[:, :, tt] = np.squeeze(rank)
        BL_eval[:, :, tt] = np.squeeze(BL)
        pdf_eval[:, :, tt] = np.squeeze(pdf)

        mat_name = info + '.mat'
        sio.savemat(
            mat_name, {
                'acur_eval': acur_eval,
                'loss_eval': loss_eval,
                'rank_eval': rank_eval,
                'BL_eval': BL_eval,
                'pdf_eval': pdf_eval
            })
Esempio n. 17
0
def sample():
    """ test on all test data and random sample 1 hair to visualize """

    dataloader = Dataloader(args.data_dir, 0)
    test_x, test_y, angles = dataloader.get_test_data()
    n_tests = test_y.shape[0]

    # random permutation
    # order = np.random.permutation(test_y.shape[0])
    # test_x, test_y, angles = test_x[order], test_y[order], angles[order]

    config = tf.ConfigProto(device_count={'GPU': 1}, allow_soft_placement=True)
    with tf.Session(config=config) as sess:
        model = create_model(sess)
        # start testing
        loss, pos_err = 0, 0
        best_loss, best_err = 10, 10
        idx = 0
        for i in range(n_tests):
            enc_in, dec_out = np.expand_dims(test_x[i], 0), np.expand_dims(
                test_y[i], 0)  # input must be [?, 32, 32, 500]
            pos, curv, step_loss = model.step(sess, enc_in, dec_out, False)
            step_pos_err = evaluate_pos(pos[0], test_y[i, ..., 100:400],
                                        test_y[i, ..., :100])
            loss += step_loss
            pos_err += step_pos_err
            if step_loss < best_loss:
                idx = i
                best_loss = step_loss
                best_err = step_pos_err
                best_pos = pos

    # re_pos = reconstruction(pos, curv)
    # pos_re_err = evaluate_pos(re_pos, test_y[..., 100:400], test_y[..., :100])
    # print('position error after reconstruction: %.4e' % pos_re_err)
        print('==================================\n'
              'total loss avg:            %.4f\n'
              'position error avg(m):     %.4f\n'
              '==================================' %
              (loss / n_tests, pos_err / n_tests))
        print('best')
        print('==================================\n'
              'total loss avg:            %.4f\n'
              'position error avg(m):     %.4f\n'
              '==================================' % (best_loss, best_err))
    # choose the last one
    visualize(args.data_dir, test_x[idx], test_y[idx, ..., 100:400],
              best_pos[0], angles[idx])
def main():
    version_name = 'v1_k=7_velocity'
    info = 'TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_' + version_name
    print(info)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(device)

    t = 5
    batch_size = 16

    print('batch_size:%d' % (batch_size))

    acur_eval = []
    loss_eval = []
    BL_eval = np.zeros((10, 5, t))
    rank_eval = np.zeros((10, 64, 5, t))

    for tt in range(t):
        print('Train %d times' % (tt))
        model_name = 'TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_k=7_' + str(
            tt) + '_MODEL.pkl'
        model = torch.load(model_name)
        model.to(device)
        model.eval()
        count = 0

        # evaluate the performance under different UE velocities
        for v in range(10, 60, 10):
            eval_loader_name = '/usr/mk/TCOM/dataset/velocity_' + str(v)
            eval_loader = Dataloader(path=eval_loader_name,
                                     batch_size=batch_size,
                                     device=device)
            eval_loader.reset()
            acur, losses, rank, BL = eval(model, eval_loader, device)
            acur_eval.append(acur)
            loss_eval.append(losses)
            rank_eval[:, :, count, tt] = np.squeeze(rank)
            BL_eval[:, count, tt] = np.squeeze(BL)
            count = count + 1

            mat_name = info + '.mat'
            sio.savemat(
                mat_name, {
                    'acur_eval': acur_eval,
                    'loss_eval': loss_eval,
                    'rank_eval': rank_eval,
                    'BL_eval': BL_eval
                })
def main(argv):
    net = Network()

    loader = Dataloader('/home/krishneel/Documents/datasets/vot/vot2014/', 'list.txt', net.get_input_shape)

    for i in range(100):
        model = net.build(loader, verbose=True)
Esempio n. 20
0
def predict(path):
    '''
	use fcn to predict for segmentation
	'''
    model = FCN16_test(config)
    data_loader = Dataloader('val', config)

    saver = tf.train.Saver()
    ckpt = './models/FCN16_adam_iter_5000.ckpt'
    dump_path = './dataset/demo/'
    if not os.path.exists(dump_path):
        os.makedirs(dump_path)

    with tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True)) as session:
        saver.restore(session, ckpt)
        print('Model restored.')
        im = cv2.imread(path)
        im = cv2.resize(im, (640, 640))
        im2 = np.expand_dims(im, 0)
        feed_dict = {model.img: im2}

        pred = session.run(model.get_output('deconv'), feed_dict=feed_dict)
        #pdb.set_trace()

        annotated_label = np.argmax(pred[0], axis=2)

        return im, annotated_label
Esempio n. 21
0
def initialize_training(data_root,
                        meta_text,
                        checkpoint_dir=None,
                        model_name=None):

    dataloader = Dataloader(data_root, meta_text)

    model = Tacotron(n_vocab=len(symbols),
                     embedding_dim=config.embedding_dim,
                     mel_dim=config.num_mels,
                     linear_dim=config.num_freq,
                     r=config.outputs_per_step,
                     padding_idx=config.padding_idx,
                     attention=config.attention,
                     use_mask=config.use_mask)

    optimizer = optim.Adam(model.parameters(),
                           lr=config.initial_learning_rate,
                           betas=(config.adam_beta1, config.adam_beta2),
                           weight_decay=config.weight_decay)

    # Load checkpoint
    if model_name != None:
        model, optimizer = warm_from_ckpt(checkpoint_dir, model_name, model,
                                          optimizer)

    return model, optimizer, dataloader
Esempio n. 22
0
    def build_model(self):
        dataloader = Dataloader(self.args)
        if self.args.phase == 'train':
            self.iter_A, self.iter_B = iter(dataloader.loader_A), iter(
                dataloader.loader_B)

            self.Ga, self.Gb = self.generator(), self.generator()
            self.Da, self.Db = self.discriminator(), self.discriminator()

            boundaries = []
            values = [self.args.lr]
            for i in range(self.args.decay_epochs):
                boundaries.append(self.args.iteration * (self.args.epochs + i))
                values.append(self.args.lr -
                              i * self.args.lr / self.args.decay_epochs)

            lr = tk.optimizers.schedules.PiecewiseConstantDecay(
                boundaries, values)
            self.optimizer_g = tk.optimizers.Adam(learning_rate=lr,
                                                  beta_1=0.5,
                                                  beta_2=0.999)
            self.optimizer_d = tk.optimizers.Adam(learning_rate=lr,
                                                  beta_1=0.5,
                                                  beta_2=0.999)

            self.summary_writer = tf.summary.create_file_writer(
                self.args.log_dir)

        elif self.args.phase == 'test':
            self.A_loader, self.B_loader = dataloader.A_loader, dataloader.B_loader
            self.N_A = self.A_loader.reduce(0, lambda x, _: x + 1)
            self.N_B = self.B_loader.reduce(0, lambda x, _: x + 1)
            self.load()
Esempio n. 23
0
 def getmetadata(self):
     args = self.args
     #args.dataset_train = 'metalist'
     args.dataset_train = 'metadata'
     args.loader_train = 'h5pymeta'
     dataloader = Dataloader(args)
     loader = dataloader.create(flag="Test")
     data_iter = iter(loader)
     i = 0
     input_metadata = []
     while i < len(loader):
         i += 1
         composition, metadata = data_iter.next()
         input_composition.append(data["composition"])
         input_metadata.append(data["metadata"])
     return input_metadata
Esempio n. 24
0
def main():
    # parse the arguments
    args = config.parse_args()
    if (args.ngpu > 0 and torch.cuda.is_available()):
        device = "cuda:0"
    else:
        device = "cpu"
    args.device = torch.device(device)
    random.seed(args.manual_seed)
    torch.manual_seed(args.manual_seed)
    if args.save_results:
        utils.saveargs(args)

    # initialize the checkpoint class
    checkpoints = Checkpoints(args)

    # Create Model
    models = Model(args)
    model, criterion, evaluation = models.setup(checkpoints)

    print('Model:\n\t{model}\nTotal params:\n\t{npar:.2f}M'.format(
        model=args.model_type,
        npar=sum(p.numel() for p in model.parameters()) / 1000000.0))

    # Data Loading
    dataloader = Dataloader(args)
    loaders = dataloader.create()

    # The trainer handles the training loop
    trainer = Trainer(args, model, criterion, evaluation)
    # The trainer handles the evaluation on validation set
    tester = Tester(args, model, criterion, evaluation)

    # start training !!!
    loss_best = 1e10
    for epoch in range(args.nepochs):
        print('\nEpoch %d/%d\n' % (epoch + 1, args.nepochs))

        # train for a single epoch
        loss_train = trainer.train(epoch, loaders)
        loss_test = tester.test(epoch, loaders)

        if loss_best > loss_test:
            model_best = True
            loss_best = loss_test
            if args.save_results:
                checkpoints.save(epoch, model, model_best)
Esempio n. 25
0
    def __init__(self, args):
        self.args = args
        self.num_scale, self.imgs, self.sizes = Dataloader(
            args).get_multi_scale_imgs_and_sizes()

        for i in range(self.num_scale):
            self.imgs[i] = tf.constant(self.imgs[i], 'float32')
            self.sizes[i].append(self.args.img_nc)
Esempio n. 26
0
class HFCycleGAN:
    def __init__(self, config):
        # Configure data loader
        self.dataloader = Dataloader(config)
        self.dataloader.load()

        self.save_path = os.path.join(config["paths"]["save"], config["run-title"])


        self.d_c_stream = load_model(config["paths"]["load-d-c-stream"], custom_objects={"InstanceNormalization": InstanceNormalization})
        self.d_r_stream = load_model(config["paths"]["load-d-r-stream"], custom_objects={"InstanceNormalization": InstanceNormalization})
        self.e_c_stream = load_model(config["paths"]["load-e-c-stream"], custom_objects={"InstanceNormalization": InstanceNormalization})

    def sample(self, index):
        r, c = 2, 2
        batch_size = 1
        
        imgs_A, imgs_B = self.dataloader.load_data(batch_size=batch_size, is_testing=True) 

        imgs_A_rand = self.dataloader.load_data_by_domain("A", batch_size=batch_size, is_testing=True)

        # Translate images to the other domain
        fake_B = self.d_c_stream.predict_on_batch(imgs_A)
        res_A  = self.d_r_stream.predict_on_batch(imgs_A_rand)

        fake_A = self.e_c_stream.predict_on_batch([imgs_B, res_A[0], res_A[1], res_A[2]])

        gen_imgs = np.concatenate([imgs_A, fake_B, imgs_B, fake_A]).squeeze()

        # Rescale images 0 - 1
        gen_imgs = 0.5 * gen_imgs + 0.5

        titles = ['Original', 'Translated']

        count = 0
        plt.switch_backend('agg')
        for batch in range(batch_size):
            fig, axs = plt.subplots(r, c)
            for i in range(r):
                for j in range(c):
                    axs[i,j].imshow(gen_imgs[count])
                    axs[i,j].set_title(titles[j])
                    axs[i,j].axis('off')
                    count += 1
            fig.savefig(os.path.join(self.save_path, f"sample_{index}_{batch}.png"))
            plt.close()
Esempio n. 27
0
def main():
    # Parse the Arguments
    args = config.parse_args()
    random.seed(args.manual_seed)
    tf.set_random_seed(args.manual_seed)
    now = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S/')
    args.save = os.path.join(args.result_path, now, 'save')
    args.logs = os.path.join(args.result_path, now, 'logs')
    if args.save_results:
        utils.saveargs(args)

    # Initialize the Checkpoints Class
    checkpoints = Checkpoints(args)

    # Create Model
    models = Model(args)
    model, criterion, evaluation = models.setup(checkpoints)

    # Print Model Summary
    print('Model summary: {}'.format(model.name))
    print(model.summary())

    # Data Loading
    dataloader_obj = Dataloader(args)
    dataloader = dataloader_obj.create()

    # Initialize Trainer and Tester
    trainer = Trainer(args, model, criterion, evaluation)
    tester = Tester(args, model, criterion, evaluation)

    # Start Training !!!
    loss_best = 1e10
    for epoch in range(args.nepochs):
        print('\nEpoch %d/%d' % (epoch + 1, args.nepochs))

        # Train and Test for a Single Epoch
        loss_train = trainer.train(epoch, dataloader["train"])
        loss_test = tester.test(epoch, dataloader["test"])

        if loss_best > loss_test:
            model_best = True
            loss_best = loss_test
            if args.save_results:
                checkpoints.save(epoch, model, model_best)
Esempio n. 28
0
class Main:
    if __name__ == "__main__":
        data_loader = Dataloader()
        # data_loader.get_and_save_stations()
        # data_loader.calculate_total_lateness()

        # load from disk
        df = data_loader.load_total_lateness()
        lm = Lateness_map()
        lm.init_map(df)
Esempio n. 29
0
    def __init__(self, dataset_dir, log_dir, generator_channels,
                 discriminator_channels, nz, style_depth, lrs, betas, eps,
                 phase_iter, batch_size, n_cpu, opt_level):
        self.nz = nz
        self.dataloader = Dataloader(dataset_dir, batch_size, phase_iter * 2,
                                     n_cpu)

        self.generator = cuda(
            DataParallel(Generator(generator_channels, nz, style_depth)))
        self.discriminator = cuda(
            DataParallel(Discriminator(discriminator_channels)))

        self.tb = tensorboard.tf_recorder('StyleGAN', log_dir)

        self.phase_iter = phase_iter
        self.lrs = lrs
        self.betas = betas

        self.opt_level = opt_level
    def __init__(
        self,
        mode='all',
        config_file='./crosswoz/configs/crosswoz_all_context.json',
        model_file='https://convlab.blob.core.windows.net/convlab-2/bert_crosswoz_all_context.zip'
    ):
        assert mode == 'usr' or mode == 'sys' or mode == 'all'
        # config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'configs/{}'.format(config_file))
        config_file = config_file
        config = json.load(open(config_file))
        DEVICE = config['DEVICE']
        data_dir = config['data_dir']
        output_dir = config['output_dir']

        # root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
        # data_dir = os.path.join(root_dir, config['data_dir'])
        # output_dir = os.path.join(root_dir, config['output_dir'])

        if not os.path.exists(os.path.join(data_dir, 'intent_vocab.json')):
            preprocess(mode)

        intent_vocab = json.load(
            open(os.path.join(data_dir, 'intent_vocab.json'),
                 encoding='utf-8'))
        tag_vocab = json.load(
            open(os.path.join(data_dir, 'tag_vocab.json'), encoding='utf-8'))
        dataloader = Dataloader(intent_vocab=intent_vocab,
                                tag_vocab=tag_vocab,
                                pretrained_weights=os.path.join(
                                    "./crosswoz",
                                    config['model']['pretrained_weights']))

        print('intent num:', len(intent_vocab))
        print('tag num:', len(tag_vocab))

        best_model_path = os.path.join(output_dir, 'pytorch_model.bin')
        if not os.path.exists(best_model_path):
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)
            print('Load from model_file param')
            archive_file = cached_path(model_file)
            archive = zipfile.ZipFile(archive_file, 'r')
            archive.extractall(output_dir)
            archive.close()
        print('Load from', best_model_path)
        model = JointBERT(config['model'], DEVICE, dataloader.tag_dim,
                          dataloader.intent_dim)
        model.load_state_dict(
            torch.load(os.path.join(output_dir, 'pytorch_model.bin'), DEVICE))
        model.to(DEVICE)
        model.eval()

        self.model = model
        self.dataloader = dataloader
        print("BERTNLU loaded")