Exemple #1
0
    def train(self):
        """
		Performs the training process given the configuration passed in
		"""
        if self.inference:
            raise ValueError("Tried to train a model in inference mode.")
        xent_epochs = self.config.max_epochs
        trainer = training.Trainer(
            self.save_fn,
            max_epochs=xent_epochs,
            max_stalled_steps=self.config.max_stalled_steps)

        with tf.Session() as sess:
            if self.model_load:
                self.train_checkpoint.restore(
                    self.model_load).assert_consumed().run_restore_ops()
                warnings.warn(
                    "You are reloading a model for training. This feature"
                    " is still not fully implemented. It restores the state"
                    " of the model variables and optimizer but not the number"
                    " of stalled steps, the validation cost record, or the"
                    " state of the shuffled corpora")
                print("Restored model at {}".format(self.model_load))
            else:
                sess.run(tf.global_variables_initializer())
            training.training_loop(
                sess,
                self.model,
                trainer,
                self.data,
                self.train_feeds,
                self.infer_feeds,
                train_batch_size=self.config.train_batch_size,
                valid_batch_size=self.config.infer_batch_size,
                min_epochs_before_validation=1)
Exemple #2
0
def main():
    """Training from scratch runs and terminates"""
    loader = dataloader.AmyloidDataloader()
    model = finetuned_convnet.FinetunedConvnet(model_config="new",
                                               use_cuda=True)
    trainer = training.Trainer(model, loader, use_cuda=True, num_epochs=2)

    model = trainer.train()
def main():
    # full_input = ann_io.read_digit_file("../a1digits/digit_train_0.txt")
    training_file_paths = [
        "../../a1digits/digit_train_0.txt", "../../a1digits/digit_train_1.txt",
        "../../a1digits/digit_train_2.txt", "../../a1digits/digit_train_3.txt",
        "../../a1digits/digit_train_4.txt", "../../a1digits/digit_train_5.txt",
        "../../a1digits/digit_train_6.txt", "../../a1digits/digit_train_7.txt",
        "../../a1digits/digit_train_8.txt", "../../a1digits/digit_train_9.txt"
    ]

    testing_file_paths = [
        "../../a1digits/digit_test_0.txt", "../../a1digits/digit_test_1.txt", "../../a1digits/digit_test_2.txt",
        "../../a1digits/digit_test_3.txt", "../../a1digits/digit_test_4.txt", "../../a1digits/digit_test_5.txt",
        "../../a1digits/digit_test_6.txt", "../../a1digits/digit_test_7.txt", "../../a1digits/digit_test_8.txt",
        "../../a1digits/digit_test_9.txt"
    ]

    training_set = ann_io.format_data(training_file_paths, config.random)
    testing_set = ann_io.format_data(testing_file_paths)

    # ANN parameters
    activator = my_math.sigmoid
    epochs = 32
    epsilon = 1e-6  # determines when the algorithm should stop early
    k = 10

    hidden_layer_args = [57]
    momentum = 0.9
    learning_rate = 0.5

    ann = FeedForwardNetwork(64, hidden_layer_args, k, momentum, learning_rate)
    trainer = training.Trainer()
    # using holdout
    trainer.run(ann, training_set, epochs, epsilon, activator)
    trainer.test(ann, testing_set)

    # name files based on parameters
    common_str = "_%de_%dn_%.2fm_%.2flr_%s.csv" % (epochs, hidden_layer_args[0], momentum, learning_rate, activator.__name__)
    training_filename = "trn" + common_str
    validation_filename = "vld" + common_str
    testing_filename = "tes" + common_str
    # write output to files
    ann_io.write_to_file(training_filename, trainer.get_results("training"))
    ann_io.write_to_file(validation_filename, trainer.get_results("validation"))
    ann_io.write_to_file(testing_filename, trainer.get_results("testing"))
    pass
Exemple #4
0
def generate(collection, corpus, xmls):
    print ""
    logger.status("Starting analysis.")
    trainer = training.Trainer(collection, corpus, xmls)
    trainer.run()
    logger.status("Finished analysis.")
    logger.status("Starting generation.")
    generator = generation.Generator(trainer.results)

    again = 'a'
    while again == 'a':
        song = generator.generate()

        # add more choices if you like ;)
        addendum = [
            '2010', 'reloaded', 'interpretation', '(unreleased)', 'ringtone',
            'brand new!', 'Masterpiece', 'Magnum Opus', 'demo', 'hidden track',
            'new album pre-release'
        ]

        # insert metadata:
        song.insert(music21.metadata.Metadata())
        song.metadata.title = '%s %s %s' % (collection.upper(),
                                            corpus.capitalize(),
                                            str(random.choice(addendum)))
        song.metadata.composer = 'Practical Music Listener Pleasing'
        # copyright doesn't seem to work this way
        #song.metadata.Copyright = "test"

        try:
            song.write(
                'musicxml', '%s_%s_%s.xml' %
                (collection, corpus, datetime.datetime.now().isoformat()))
        except Exception:
            continue
        try:
            song.show()
        except music21.environment.EnvironmentException:
            logger.journal(
                "Couldn't use show method to display generated score.")
        again = str(
            raw_input(
                "  `a` to create another, anything else to exit to menu > "))
Exemple #5
0
    def train(self, train_affect=False):
        """
		Performs the training process given the configuration passed in

		:param bool train_affect: Whether to train using cross-entropy or affective loss
		"""
        if self.inference:
            raise ValueError("Tried to train a model in inference mode.")
        xent_epochs = self.config.max_epochs

        trainer = training.Trainer(
            self.save_fn,
            max_epochs=xent_epochs,
            max_stalled_steps=self.config.max_stalled_steps)

        with tf.Session() as sess:
            if self.model_load:
                warnings.warn(
                    "You are reloading a model for training. This feature"
                    " is still not fully implemented. It restores the state"
                    " of the model variables and optimizer but not the number"
                    " of stalled steps, the validation cost record, or the"
                    " state of the shuffled corpora")
                self.train_checkpoint.restore(
                    self.model_load).assert_consumed().run_restore_ops()
                print("Restored model at {}".format(self.model_load))
            else:
                sess.run(tf.global_variables_initializer())

            training.training_loop(
                sess,
                self.model,
                trainer,
                self.data,
                self.train_feeds,
                self.infer_feeds,
                train_batch_size=self.config.train_batch_size,
                valid_batch_size=self.config.infer_batch_size,
                min_epochs_before_validation=1)

            if train_affect:
                affect_epochs = (trainer.epochs_completed //
                                 4) + 1 * (trainer.epochs_completed < 4)
                total_epochs = trainer.epochs_completed + affect_epochs
                train_feeds[self.model.train_affect] = True
                print(
                    "Switching from cross-entropy to maximum affective content . . ."
                )

                affect_trainer = training.Trainer(
                    self.checkpoint_best,
                    self.checkpoint_latest,
                    save_fn,
                    epochs_completed=trainer.epochs_completed,
                    max_epochs=total_epochs,
                    saver=trainer.saver,
                    best_valid_cost=trainer.best_valid_cost)
                training.training_loop(
                    sess,
                    self.model,
                    affect_trainer,
                    self.data,
                    self.train_feeds,
                    self.valid_feeds,
                    train_batch_size=self.config.train_batch_size,
                    valid_batch_size=self.config.infer_batch_size)
# Instantiate a loss function
loss_fn = losses.threeD_loss

#### build model ####

vae = vap.VAEparticle(input_shape=input_shape,
                      z_sz=params.z_sz,
                      filter_ini_n=6,
                      kernel_sz=3)
model = vae.build(mean_stdev)

#### train
trainer = tra.Trainer(optimizer=optimizer,
                      beta=params.beta,
                      patience=5,
                      min_delta=0.001,
                      max_lr_decay=4,
                      lambda_reg=params.lambda_reg)
model, losses_reco, losses_valid = trainer.train(model=model,
                                                 loss_fn=loss_fn,
                                                 train_ds=train_ds,
                                                 valid_ds=valid_ds,
                                                 epochs=150)
trainer.plot_training_results(losses_reco, losses_valid, 'fig/test')

#### show results
for i in range(3):
    img = next(valid_ds.as_numpy_iterator())
    plt.imshow(np.squeeze(img[0]), cmap='gray')
    plt.savefig('fig/test/orig' + str(i) + '.png')
    plt.clf()
def trainer():
    """Returns a trained corpus"""
    return trn.Trainer("lib/config/korr_main.json")
Exemple #8
0
def main():
    parser = argparse.ArgumentParser(description='Train on RadioML 2016 data.')
    parser.add_argument('-d',
                        '--debug',
                        action='store_const',
                        const=logging.DEBUG,
                        dest='loglevel',
                        default=logging.WARNING,
                        help='print debugging information')
    parser.add_argument('-v',
                        '--verbose',
                        action='store_const',
                        const=logging.INFO,
                        dest='loglevel',
                        help='be verbose')
    parser.add_argument('--seed',
                        type=int,
                        default=2016,
                        help='set random seed')
    parser.add_argument('--batch-size',
                        type=int,
                        default=1024,
                        help='batch size')
    parser.add_argument('--train',
                        action='store_true',
                        default=False,
                        help='train model')
    parser.add_argument('--predict',
                        action='store_true',
                        default=False,
                        help='use model for prediction')
    parser.add_argument('--model',
                        default='vtcnn2',
                        choices=[
                            'vtcnn2', 'resnet18-outer', 'resnet18-gasf',
                            'resnet18-gadf', 'resnet18-mtf', 'resnet18-rplot',
                            'resnet18-noisy-outer'
                        ],
                        help='model to use')
    parser.add_argument('--dropout',
                        type=float,
                        default=0.5,
                        help="Choose a dropout rate")
    parser.add_argument('input', help='RadioML data in standard HDF5 format')

    # Parse arguments:
    args = parser.parse_args()
    # Set up logging
    logging.basicConfig(
        format='%(asctime)s:%(name)s:%(levelname)s:%(message)s',
        level=args.loglevel)

    # Allow memory growth on GPU
    physical_devices = tf.config.list_physical_devices('GPU')
    logging.info(physical_devices)
    try:
        tf.config.experimental.set_memory_growth(physical_devices[0], True)
    except:
        # Invalid device or cannot modify virtual devices once initialized.
        pass

    # Set random seeds
    training.set_seed(args.seed)

    # Load RadioML 2016 data
    classes, data = radioml.load_numpy(args.input)

    dataset_name = os.path.splitext(os.path.basename(args.input))[0]

    input_shape = data['iq_data'].shape[1:]

    # Create model
    trainer = training.Trainer('models', dataset_name, args.model, args.seed)
    model, preprocess = get_model(args.model, input_shape, len(classes), args)
    model.run_eagerly = True
    trainer.model = model

    if args.predict:
        trainer.load_weights()

    trainer.compile()
    logging.info(trainer.model.summary())

    # Split into training, validate, and test sets
    train, validate, test = training.split_training(data, 0.5, 0.5)

    train_dataset = to_tfdata(classes, train)
    validate_dataset = to_tfdata(classes, validate)
    test_dataset = to_tfdata(classes, test)

    # Map preprocessing function over data
    def f(x, lbl):
        return (preprocess(x), lbl)

    train_dataset = train_dataset.map(f).batch(args.batch_size).prefetch(1)
    validate_dataset = validate_dataset.map(f).batch(
        args.batch_size).prefetch(1)
    test_dataset = test_dataset.map(f).batch(args.batch_size).prefetch(1)
    if args.train:
        trainer.fit(train_dataset, validation_data=validate_dataset)
    if args.predict:
        predictions = trainer.model.predict(validate_dataset, verbose=2)
        trainer.save_predictions(predictions)

    score = trainer.model.evaluate(validate_dataset, verbose=0)
    print(score)
Exemple #9
0
def main():

    start_time = time.time()

    config = setup(args)

    torch.manual_seed(config['seed'])

    device = torch.device('cuda' if (
        torch.cuda.is_available() and config['gpu'] != '-1') else 'cpu')

    # Create or load model
    # Fail early if something is amiss
    logger.info("Creating model ...")
    my_model = model.DCN_Detector(opt.heads, opt.head_conv)
    optimizer = torch.optim.Adam(my_model.parameters(), opt.lr)
    start_epoch = 0
    if args.load_model:
        my_model, optimizer, start_epoch = utils.load_model(
            my_model, args.load_model, optimizer, args.resume, args.lr,
            args.lr_step)
    my_model.to(device)

    # Initialize data generators
    logger.info("Loading and preprocessing data ...")
    my_dataset = dataset.ObjDetDataset()

    val_loader = torch.utils.data.DataLoader(my_dataset(opt, 'val'),
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=1,
                                             pin_memory=True)

    trainer = training.Trainer(opt, my_model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    if opt.test:  # Only evaluate and skip training
        _, preds = trainer.val(0, val_loader)
        val_loader.dataset.run_eval(preds, opt.save_dir)
        return

    train_loader = torch.utils.data.DataLoader(my_dataset(opt, 'train'),
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True,
                                               drop_last=True)

    logger.info('Starting training...')
    best = 1e10
    metrics = [
    ]  # list of lists: for each epoch, stores metrics like accuracy, DICE,
    it = tqdm(range(1, config["epochs"] + 1), desc='Training', ncols=0)
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'
        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.info('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.info('{} {:8f} | '.format(k, v))
        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, my_model, optimizer)
            with torch.no_grad():
                log_dict_val, preds = trainer.val(epoch, val_loader)
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
            if log_dict_val[opt.metric] < best:
                best = log_dict_val[opt.metric]
                save_model(os.path.join(opt.save_dir, 'model_best.pth'), epoch,
                           my_model)
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch,
                       my_model, optimizer)
        if epoch in opt.lr_step:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, my_model, optimizer)
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

        metrics.append([epoch, accuracy, dice, precision, recall])

        it.set_postfix(Accuracy='{:.3f}%'.format(accuracy * 100),
                       Max_Accuracy='{:.3f}%'.format(max_acc * 100),
                       Dice='{:.3f}%'.format(dice * 100),
                       Max_Dice='{:.3f}%'.format(max_dice * 100),
                       Precision='{:.3f}%'.format(precision * 100),
                       Max_Precision='{:.3f}%'.format(max_prec * 100),
                       Recall='{:.3f}%'.format(recall * 100),
                       Max_Recall='{:.3f}%'.format(max_rec * 100),
                       Last_pruning='{:2d}'.format(latest_pruning),
                       Pruned='{:.2f}%'.format(pruned_ratio * 100))

    # Export evolution of metrics over epoch
    header = [
        "Epoch", "Accuracy", "DICE", "Precision", "Recall", "Pruned ratio"
    ]
    metrics_filepath = os.path.join(
        config["output_dir"], "metrics_" + config["experiment_name"] + ".xls")
    book = utils.export_performance_metrics(metrics_filepath,
                                            metrics,
                                            header,
                                            sheet_name="metrics")

    # Export best metrics per pruning level
    header = ["Pruned ratio", "DICE", "Accuracy", "Precision", "Recall"]
    utils.export_performance_metrics(metrics_filepath,
                                     best_at_prune_level,
                                     header,
                                     book,
                                     sheet_name="best_metrics")

    # Export record metrics to a file accumulating records from all experiments
    utils.register_record(config["records_file"], config["initial_timestamp"],
                          config["experiment_name"], metrics)

    logger.info('All Done!')

    total_runtime = time.time() - start_time
    logger.info("Total runtime: {} hours, {} minutes, {} seconds\n".format(
        total_runtime // 3600, (total_runtime // 60) % 60, total_runtime % 60))
Exemple #10
0
    if args.targets != '':
        targets = args.targets.split(',')

    dataset_dir = cfg.PREPROCESSED_DATA_SETS_DIR


    cfg.DATA_SETS_TYPE == 'SKT'
    if cfg.OBJ_TYPE == 'car':
        train_car = 'object3d_all'
        train_data = 'all_val'
        train_dataset = {
            train_car: [train_data]
        }
    if cfg.OBJ_TYPE == 'car':
        validation_car = 'object3d_all'
        validation_data = 'all_val'
        validation_dataset = {
            validation_car: [validation_data]
        }


    dataset_loader_train = batch_loading(cfg.PREPROCESSED_DATA_SETS_DIR, train_dataset, is_testset=False)
    dataset_loader_validation = batch_loading(cfg.PREPROCESSED_DATA_SETS_DIR, validation_dataset, is_testset=False)


    train = mv3d.Trainer(train_set=dataset_loader_train, validation_set=dataset_loader_validation,
                         pre_trained_weights=weights, train_targets=targets, log_tag=tag,
                         continue_train=args.continue_train, learning_rate = 0.001)

    train(max_iter=max_iter)