示例#1
0
def run(model, model_pre, optimizer, args, train_loader, test_loader):
    # model, optimizer, args = initializer(local_args)
    # train_loader, test_loader = load_dataset(args)
    best = {}
    best_epoch = 0
    for epoch in range(args.start_epoch, args.epochs + 1):
        print('{:3d}: '.format(epoch), end='')
        result = {'epoch': epoch}

        # use adaptive learning rate
        if args.adaptive_lr:
            adaptive_learning_rate(model, optimizer, epoch)
        result.update(train(args, model, model_pre, train_loader, optimizer))

        # validate and keep history at each log interval
        if epoch % args.log_interval == 0:
            result.update(validate(args, model, test_loader))
            save_history(args, result)
            if not best or result['val_loss'] < best['val_loss']:
                best = result
                best_epoch = epoch

        # save model parameters
        if not args.no_save_model:
            save_model(args, model, epoch)

    # print the best validation result
    print(
        '\nThe best avg val_loss: {:.4f}, avg val_cost: {:.2f}%, avg val_acc: {:.2f}%\n'
        .format(best['val_loss'], best['cost'], best['acc']))

    # save the model giving the best validation results as a final model
    if not args.no_save_model:
        save_model(args, model, best_epoch, True)
    plot_history(args)
示例#2
0
def main(scenarios_file, device, epochs, patience, verbose, hyper_params_file):
    hps = get_hyperparameter_options(hyper_params_file)[0]
    batch_size = hps['batch_size']
    limit_vectors = hps['limit_vectors']

    dlf = MVSDataLoaderFactory(batch_size=batch_size, limit_vectors=limit_vectors)
    scenarios = load_scenarios(scenarios_file)
    print('training in', device)

    # dumb model only to save specs
    dmodel = HS_Model(vector_size=vector_size, device=device, patience=patience, **hps)
    save_config(dmodel, hps, results_config_file)

    for scenario in scenarios:
        reset_all_seeds(RANDOM_SEED)
        print('\nTraining scenario:',scenario)
        print('Hyperparameters:',hps)

        train_loader, dev_loader, test_loader = dlf.data_loaders_from_scenario(scenario)
        model = HS_Model(vector_size=vector_size, device=device, patience=patience,
            save_best=True, scenario=scenario, model_path=model_path, **hps)
        
        model.train(train_loader, dev_loader, epochs=epochs, verbose=verbose)
        best_model = load_model(model_path, scenario)
        save_summary(results_file, scenario, model, best_model, train_loader, dev_loader, test_loader, verbose=1)
        save_history(history_path, scenario, model)
        print('Finish training scenario:',scenario)
示例#3
0
def train_canterpillar_with_generator(name):
    model = canterpillar_net()
    model.summary()
    optimiser = sgd(momentum=0.9, nesterov=True)

    model.compile(optimizer=optimiser, loss=mean_squared_error)

    x_train, x_test = prepare_data_for_canterpillar(segment_len=None)
    batch_size = 20
    steps_per_epoch = 15
    print("батчей за эпоху будет:" + str(steps_per_epoch))
    print("в одном батче " + str(batch_size) + " кардиограмм.")
    train_generator = ecg_batches_generator(segment_len=ecg_segment_len,
                                            batch_size=batch_size,
                                            ecg_dataset=x_train)
    test_generator = ecg_batches_generator(segment_len=ecg_segment_len,
                                           batch_size=batch_size,
                                           ecg_dataset=x_test)

    tb_callback = TensorBoard(log_dir='./caterpillar_logs',
                              histogram_freq=5,
                              write_graph=True,
                              write_grads=True)
    y_test = next(test_generator)

    history = model.fit_generator(generator=train_generator,
                                  steps_per_epoch=steps_per_epoch,
                                  epochs=50,
                                  validation_data=y_test,
                                  validation_steps=2,
                                  callbacks=[tb_callback])

    save_history(history, name)
    model.save(name + '.h5')
    return model
示例#4
0
def train_and_evaluate(model, train_dataloader, val_dataloader, optimizer,
                       loss_fn, epochs):

    best_val_MSE = float('inf')

    for epoch in range(epochs):
        # Run one epoch
        logging.info("Epoch {}/{}".format(epoch + 1, epochs))
        for param_group in optimizer.param_groups:
            print(param_group['lr'])
        # compute number of batches in one epoch (one full pass over the training set)
        train(model, optimizer, loss_fn, train_dataloader)

        # Evaluate MSE for one epoch on train and validation set
        train_MSE = evaluate(results_dir, model, nn.MSELoss(),
                             train_dataloader, device, dtype)
        val_MSE = evaluate(results_dir, model, nn.MSELoss(), val_dataloader,
                           device, dtype)
        #test_MSE = evaluate(results_dir, model, nn.MSELoss(), test_dataloader, device, dtype)
        # Evaluate L1 for one epoch on train and validation set
        train_L1 = evaluate(results_dir, model, nn.L1Loss(), train_dataloader,
                            device, dtype)
        val_L1 = evaluate(results_dir, model, nn.L1Loss(), val_dataloader,
                          device, dtype)

        scheduler.step(train_MSE)

        # save training history in csv file:
        utils.save_history(epoch, train_MSE, val_MSE, val_MSE, train_L1,
                           val_L1, results_dir)

        # print losses
        logging.info("- Train average RMSE loss: " + str(np.sqrt(train_MSE)))
        logging.info("- Validation average RMSE loss: " +
                     str(np.sqrt(val_MSE)))
        #logging.info("- Test average RMSE loss: " + str(np.sqrt(test_MSE)))

        # save MSE if is the best
        is_best = val_MSE <= best_val_MSE
        # If best_eval, best_save_path
        if is_best:
            logging.info("- Found new best evaluation loss")
            # Save best val loss in a txt file in the checkpoint directory
            best_val_path = "best_val_loss.txt"
            utils.save_dict_to_txt(val_MSE, results_dir, best_val_path, epoch)
            best_val_MSE = val_MSE

        # Save latest val metrics in a json file in the results directory
        last_val_path = "last_val_loss.txt"
        utils.save_dict_to_txt(val_MSE, results_dir, last_val_path, epoch)

        # Save weights
        utils.save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'optim_dict': optimizer.state_dict()
            },
            is_best=is_best,
            checkpoint=checkpoint_dir)
示例#5
0
def search_dropout(dropout_rates=(.2, .3, .4, .5)):
    params = utils.Params('experiments/crop_64/params.json')
    for dr in dropout_rates:
        print(f'dropout_rate={dr}')
        params.dropout = dr
        trainer = Trainer(params=params)
        history = trainer.train()
        utils.save_history(history, trainer, param_name='dropout')
示例#6
0
def train_model(modelBuilder):
    train_df = load_dataframe('train')
    test_df = load_dataframe('test')

    X_train = process(transform_dataset(train_df), isolate)
    X_test = process(transform_dataset(test_df), isolate)

    target_train = train_df['is_iceberg']
    X_train_cv, X_valid, y_train_cv, y_valid = train_test_split(
        X_train, target_train, random_state=1, train_size=0.75)

    model = modelBuilder()
    optimizer = Adam(lr=LEARNING_RATE,
                     beta_1=BETA_1,
                     beta_2=BETA_2,
                     epsilon=EPSILON,
                     decay=DECAY)
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])
    model.summary()

    callbacks = build_save_callbacks(filepath=MODEL_PATH, patience=5)

    datagen = ImageDataGenerator(
        #         featurewise_center=True,
        #         featurewise_std_normalization=True,
        #         rotation_range=20,
        #         width_shift_range=0.2,
        #         height_shift_range=0.2,
        #         horizontal_flip=True
    )
    datagen.fit(X_train)

    empty = ImageDataGenerator()
    empty.fit(X_valid)

    steps_per_epoch = len(X_train_cv) // BATCH_SIZE
    hist = model.fit_generator(datagen.flow(X_train_cv,
                                            y_train_cv,
                                            batch_size=BATCH_SIZE),
                               epochs=EPOCHS,
                               verbose=VERBOSE,
                               validation_data=empty.flow(X_valid, y_valid),
                               steps_per_epoch=steps_per_epoch,
                               callbacks=callbacks)

    model.load_weights(filepath=MODEL_PATH)
    score = model.evaluate(X_valid, y_valid, verbose=1)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])

    predicted_test = model.predict_proba(X_test)

    save_submission(test_df, predicted_test, filename='sub.csv')
    save_history(hist.history, model_name=MODEL_NAME)
示例#7
0
def check_update() -> List[News]:
    history = load_history()

    using_crawlers = filter(lambda c: c.SITE_NAME in TOKEN_TABLE.keys(),
                            get_all_crawler_classes())
    with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
        params = [(clazz, history) for clazz in using_crawlers]
        result = executor.map(crawl_news_with_class, params)

    save_history(history)
    return sum(list(result), [])
示例#8
0
def train_fish(fish, name):
    x = get_healthy_dataset()
    train_generator, test_generator = make_generators(x)
    history = fish.fit_generator(generator=train_generator,
                                 steps_per_epoch=20,
                                 epochs=50,
                                 validation_data=test_generator,
                                 validation_steps=1)

    save_history(history, name)
    fish.save(name + '.h5')
示例#9
0
 def tune_kernel_initializer(self, kernel_initializers=('glorot_uniform', 'he_uniform', 'he_normal')):
     for kernel_initializer in kernel_initializers:
         print(f'============== kernel_initializer: {kernel_initializer} ==============')
         self.params.kernel_initializer = kernel_initializer
         self.trainer = Trainer(params=self.params,
                                net_class=self.net_class,
                                experiment_dir=self.experiment_dir,
                                is_toy=self.is_toy,
                                set_seed=self.set_seed)
         history = self.trainer.train()
         utils.save_history(history, self.trainer, param_name='kernel_initializer')
示例#10
0
 def tune_image_shape(self, input_shapes=((512, 512, 3), (256, 256, 3))):
     for input_shape in input_shapes:
         print(f'============== input_shape: {input_shape} ==============')
         self.params.input_shape = input_shape
         self.trainer = Trainer(params=self.params,
                                net_class=self.net_class,
                                experiment_dir=self.experiment_dir,
                                is_toy=self.is_toy,
                                set_seed=self.set_seed)
         history = self.trainer.train()
         utils.save_history(history, self.trainer, param_name='input_shape')
示例#11
0
 def tune_leaky_relu(self, alphas=(0, .001, .01, .1, .2, .3), name_modifier=''):
     for alpha in alphas:
         print(f'============== alpha: {alpha} ==============')
         self.params.alpha = alpha
         self.trainer = Trainer(params=self.params,
                                net_class=self.net_class,
                                experiment_dir=self.experiment_dir,
                                is_toy=self.is_toy,
                                set_seed=self.set_seed)
         history = self.trainer.train()
         utils.save_history(history, self.trainer, param_name='alpha', name_modifier=name_modifier)
示例#12
0
 def tune_batch_size(self, batch_sizes=(2, 4, 8, 16, 32)):
     for bs in batch_sizes:
         print(f'============== bs: {bs} ==============')
         self.params.batch_size = bs
         self.trainer = Trainer(params=self.params,
                                net_class=self.net_class,
                                experiment_dir=self.experiment_dir,
                                is_toy=self.is_toy,
                                set_seed=self.set_seed)
         history = self.trainer.train()
         utils.save_history(history, self.trainer, param_name='batch_size')
示例#13
0
def search_crop(crop_sizes=(64, 128, 256)):
    experiment_dir = Path('experiments/augmentation')

    for crop_size in crop_sizes:
        print(f'crop_size={crop_size}')
        params = utils.Params(experiment_dir / 'params.json')
        params.input_shape = [crop_size, crop_size, 1]
        params.crop_size = crop_size

        trainer = Trainer(params=params)
        history = trainer.train()
        utils.save_history(history, trainer, param_name='crop_size')
示例#14
0
    def tune_lr(self, rates=(1e-3, 1e-4, 1e-5, 1e-6)):
        for lr in rates:
            print(f'============== lr: {lr} ==============')

            self.params.learning_rate = lr
            self.trainer = Trainer(params=self.params,
                                   net_class=self.net_class,
                                   experiment_dir=self.experiment_dir,
                                   is_toy=self.is_toy,
                                   set_seed=self.set_seed)
            history = self.trainer.train()
            utils.save_history(history, self.trainer, param_name='learning_rate')
示例#15
0
def train(name):
    model = get_model()
    generator_train, generator_test = get_generators(train_batch=15,
                                                     test_batch=50)
    history = model.fit_generator(generator=generator_train,
                                  steps_per_epoch=40,
                                  epochs=10,
                                  validation_data=generator_test,
                                  validation_steps=1)

    save_history(history, name)
    model.save(name + '.h5')
示例#16
0
def train_batterfly(name):
    x_train, x_test, y_train, y_test = prepare_data(
        seg_len=None)  # вытаскиваем полный непорезанный датасет
    num_labels = y_train.shape[1]
    model = create_batterfly(num_labels=num_labels)
    model.summary()
    batch_size = 30

    train_generator = ecg_batches_generator_for_classifier(
        segment_len=ecg_segment_len,
        batch_size=batch_size,
        ecg_dataset=x_train,
        diagnodses=y_train)
    test_generator = ecg_batches_generator_for_classifier(
        segment_len=ecg_segment_len,
        batch_size=300,
        ecg_dataset=x_test,
        diagnodses=y_test)
    steps_per_epoch = 40
    print("батчей за эпоху будет:" + str(steps_per_epoch))
    print("в одном батче " + str(batch_size) + " кардиограмм.")

    #уменьшение learning rate автоматически на плато
    #learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss', factor = 0.1, patience = 5, verbose = 1)

    #изменение LR по методу SGDR
    #change_lr = cosine_lr.SGDRScheduler(min_lr=0.0001, max_lr=0.1, steps_per_epoch=np.ceil(15/batch_size), lr_decay=0.8, cycle_length=1, mult_factor=1)

    #tb_callback = TensorBoard(log_dir='./butterfly_logs', histogram_freq=20, write_graph=True, write_grads=True)
    history = model.fit_generator(generator=train_generator,
                                  steps_per_epoch=steps_per_epoch,
                                  epochs=50,
                                  validation_data=test_generator,
                                  validation_steps=1)

    save_history(history, name)
    model.save(name + '.h5')

    eval_generator = ecg_batches_generator_for_classifier(
        segment_len=ecg_segment_len,
        batch_size=700,
        ecg_dataset=x_test,
        diagnodses=y_test)
    xy = next(eval_generator)

    # заставляем модель предсказать
    prediction = model.predict_on_batch(x=xy[0])
    print("ответ модели:")
    print(prediction)
    print("правильный ответ:")
    print(xy[1])
    return xy[1], prediction
示例#17
0
    def train(self, load_weights=False):

        if load_weights:
            self.model.load_weights(self.weight_file)

        history = self.model.fit_generator(generator=self.train_gen,
                                           steps_per_epoch=self.params.num_train//self.params.batch_size,
                                           epochs=self.params.epochs,
                                           validation_data=self.val_gen,
                                           validation_steps=self.params.num_val//self.params.batch_size,
                                           callbacks=self.callbacks)
        utils.save_history(history, self)
        return history
示例#18
0
def main():
    """Main function of the program.

    The function loads the dataset and calls training and validation functions.
    """
    model, optimizer, args = initializer()
    train_loader, test_loader, exit_tags = utils.load_dataset(args)

    # disable training
    if args.testing:
        result = validate(args, model, test_loader)
        #print('\nThe avg val_loss: {:.4f}, avg val_cost: {:.2f}%, avg val_acc: {:.2f}%\n'
        #      .format(result['val_loss'], result['cost'], result['acc']))
        #examine(args, model, test_loader)
        return

    if args.two_stage:
        args.loss_func = "v0"

    for epoch in range(args.start_epoch, args.epochs + 1):
        print('{:3d}:'.format(epoch), end='')

        # two-stage training uses the loss version-1 after training for 25 epochs
        if args.two_stage and epoch > 25:
            args.loss_func = "v1"

        # use adaptive learning rate
        if args.adaptive_lr:
            utils.adaptive_learning_rate(args, optimizer, epoch)

        result = {'epoch': epoch}
        result.update(train(args, model, train_loader, optimizer, exit_tags))

        # validate and keep history at each log interval
        if epoch % args.log_interval == 0:
            result.update(validate(args, model, test_loader))
            utils.save_history(args, result)

        # save model parameters
        if not args.no_save_model:
            utils.save_model(args, model, epoch)

    # print the best validation result
    best_epoch = utils.close_history(args)

    # save the model giving the best validation results as a final model
    if not args.no_save_model:
        utils.save_model(args, model, best_epoch, True)

    utils.plot_history(args)
示例#19
0
def train_func(args, log_dir):
    model, model_dir, (trainx, trainy, testx, testy) = build_model(args)

    # Callback_1
    history_callback = Batch_History()

    # Callback_2
    state_file = os.path.join(model_dir, 'state.json')
    #state_file = "/home/plash/petpen/state.json"
    state_callback = Model_state(state_file, model.config)
    history = model.train(callbacks=[history_callback, state_callback])
    save_history(os.path.join(log_dir,'train_log'), history, history_callback)
    model_result_path = os.path.dirname(log_dir)
    model.save(os.path.join(model_result_path,'weights.h5'))
示例#20
0
    def tune_model_size(self, model_sizes=('regular', 'big')):

        net_classes = {'regular': FullUnet,
                       'big': BiggerLeakyUnet}

        for model_size in model_sizes:
            print(f'============== model_size: {model_size} ==============')
            self.params.model_size = model_size
            self.trainer = Trainer(params=self.params,
                                   net_class=net_classes[model_size],
                                   experiment_dir=self.experiment_dir,
                                   is_toy=self.is_toy,
                                   set_seed=self.set_seed)
            history = self.trainer.train()
            utils.save_history(history, self.trainer, param_name='model_size')
示例#21
0
def train_basic_model(args, path_configs: PathConfigs,
                      train_configs: SimpleTrainingConfigs, train_images,
                      train_labels, test_images, test_labels):
    logging.info('Training basic network...')

    model_name = args.model
    model = utils.create_model(model_name)

    model.build(input_shape=(train_configs.batch_size, IMAGE_HEIGHT,
                             IMAGE_WIDTH, IMAGE_CHANNELS))
    model.summary()

    opt = SGD(lr=train_configs.learning_rate, momentum=train_configs.momentum)
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    if args.use_augmentation:
        model_name += '_augment'
        data_gen = ImageDataGenerator(rotation_range=10,
                                      zoom_range=0.1,
                                      width_shift_range=0.1,
                                      height_shift_range=0.1,
                                      horizontal_flip=True)
        it_train = data_gen.flow(train_images,
                                 train_labels,
                                 batch_size=train_configs.batch_size)
        steps = int(train_images.shape[0] / train_configs.batch_size)
        history = model.fit_generator(it_train,
                                      steps_per_epoch=steps,
                                      epochs=train_configs.epochs,
                                      validation_data=(test_images,
                                                       test_labels),
                                      verbose=2)

    else:
        history = model.fit(train_images,
                            train_labels,
                            epochs=train_configs.epochs,
                            batch_size=train_configs.batch_size,
                            validation_data=(test_images, test_labels),
                            verbose=2)

    model.save_weights(path_configs.model_weights_folder_name + '/' +
                       model_name)
    utils.save_history(
        history.history, path_configs.training_histories_folder_name + '/' +
        model_name + '_history.json')
示例#22
0
def train(name, need_permute=False):
    model = get_model()
    if need_permute:
        generator_train, generator_test = get_generators_permute(
            train_batch=15, test_batch=50)
    else:
        generator_train, generator_test = get_generators(train_batch=15,
                                                         test_batch=50)
    history = model.fit_generator(generator=generator_train,
                                  steps_per_epoch=40,
                                  epochs=45,
                                  validation_data=generator_test,
                                  validation_steps=1)

    save_history(history, name)
    model.save(name + '.h5')
示例#23
0
def train(model, training_data, validation_data, optimizer, device, opt):
    ''' Start training '''

    log_train_file = None
    log_valid_file = None

    if opt.log:
        log_train_file = opt.log + '.train.log'
        log_valid_file = opt.log + '.valid.log'

        print('[Info] Training performance will be written to file: {} and {}'.format(
            log_train_file, log_valid_file))

        with open(log_train_file, 'w') as log_tf, open(log_valid_file, 'w') as log_vf:
            log_tf.write('epoch,loss,ppl,accuracy\n')
            log_vf.write('epoch,loss,ppl,accuracy\n')

    history = []
    valid_accus = []
    for e in range(opt.epoch):

        train_loss, train_accu = train_epoch(
            model, training_data, optimizer, device, smoothing=opt.label_smoothing)

        valid_loss, valid_accu = eval_epoch(model, validation_data, device)

        history.append([train_loss, valid_loss, valid_accu])
        valid_accus += [valid_accu]

        if valid_accu >= max(valid_accus):
            save_model(model, opt.result_dir)
            print('[Info] The checkpoint file has been updated.')

        if log_train_file and log_valid_file:
            with open(log_train_file, 'a') as log_tf, open(log_valid_file, 'a') as log_vf:
                log_tf.write('{epoch},{loss: 8.5f},{ppl: 8.5f},{accu:3.3f}\n'.format(
                    epoch=e, loss=train_loss,
                    ppl=math.exp(min(train_loss, 100)), accu=100*train_accu))
                log_vf.write('{epoch},{loss: 8.5f},{ppl: 8.5f},{accu:3.3f}\n'.format(
                    epoch=e, loss=valid_loss,
                    ppl=math.exp(min(valid_loss, 100)), accu=100*valid_accu))

        show_progress(e+1, opt.epoch, train_loss, valid_loss, valid_accu)

    save_history(history, opt.result_dir)
示例#24
0
 def train(self, X, y, h1=1000, h2=600):
     trn_acc = []
     val_acc = []
     trn_loss = []
     val_loss = []
     ae1, W1 = self._init_autoencoder(X, 0, h1, X.shape[1])
     ae2, W2 = self._init_autoencoder(ae1, 1, h2, h1)
     for trn_i, val_i in StratifiedKFold(n_splits=10, shuffle=True).split(X, y):
         trn_x, trn_y = X[trn_i], y[trn_i]
         val_x, val_y = X[val_i], y[val_i]
         history = self._init_neural_net(trn_x, trn_y, val_x, val_y, h1, h2, W1, W2)
         trn_acc.append(history.history['acc'])
         val_acc.append(history.history['val_acc'])
         trn_loss.append(history.history['loss'])
         val_loss.append(history.history['val_loss'])
         del history
         gc.collect()
     save_history(trn_acc, val_acc, trn_loss, val_loss, 'heinsfeld_autoencoder_training')
示例#25
0
    def tune_batch_norm(self, normalizations=('no-batch-norm', 'batch-norm'), name_modifier=''):

        net_classes = {'no-batch-norm': BiggerLeakyUnet,
                       'batch-norm': BiggerLeakyBNUnet}

        for normalization in normalizations:
            print(f'============== normalization: {normalization} ==============')
            self.params.normalization = normalization
            if normalization == 'batch-norm':
                self.params.learning_rate = .1
            else:
                self.params.learning_rate = 1e-5
            self.trainer = Trainer(params=self.params,
                                   net_class=net_classes[normalization],
                                   experiment_dir=self.experiment_dir,
                                   is_toy=self.is_toy,
                                   set_seed=self.set_seed)
            history = self.trainer.train()
            utils.save_history(history, self.trainer, param_name='normalization', name_modifier=name_modifier)
示例#26
0
def train_func(args):
    model, model_dir, dataset_dir = build_model(args)
    model_file = os.path.join(model_dir, 'result.json')
    dataset_file = os.path.join(dataset_dir, 'train.csv')
    model.load_dataset(model_file, dataset_file)

    # Callback_1
    history_callback = Batch_History()
    str_start_time = datetime.now().strftime('%y%m%d_%H%M%S')
    model_result_path = os.path.join(model_dir, str_start_time)
    os.mkdir(model_result_path)
    trainlog_dir = os.path.join(model_result_path, 'logs')
    os.mkdir(trainlog_dir)

    # Callback_2
    state_file = os.path.join('.', 'state.json')
    state_callback = Model_state(state_file, model.config)
    history = model.train(callbacks=[history_callback, state_callback])
    save_history(os.path.join(trainlog_dir, 'train_log'), history,
                 history_callback)
    model.save_weights(os.path.join(model_result_path, 'weights.h5'))
示例#27
0
def train_func(args, log_dir):
    id = args.id
    change_status('loading', id)
    model, model_dir, (trainx, trainy, testx, testy) = build_model(args)

    # Callback_1
    history_callback = Batch_History()

    # Callback_2
    state_file = os.path.join(model_dir, 'state.json')
    #state_file = "/home/plash/petpen/state.json"
    state_callback = Model_state(state_file, model.config)

    # Callback_3
    rl_callback = RealtimeLogger(os.path.join(log_dir, 'realtime_logging.txt'))

    change_status('running', id)
    history = model.train(
        callbacks=[history_callback, state_callback, rl_callback])
    save_history(os.path.join(log_dir, 'train_log'), history, history_callback)
    model_result_path = os.path.dirname(log_dir)
    model.save(os.path.join(model_result_path, 'weights.h5'))
示例#28
0
def train_butterfly_binary(name):
    model = create_batterfly(num_labels=1)
    model.summary()
    optimiser = sgd(momentum=0.9, nesterov=True)

    model.compile(optimizer=optimiser,
                  loss=mean_squared_error,
                  metrics=['accuracy'])

    train_generator, test_generator = get_generators(
        train_batch=20, test_batch=50, segment_len=ecg_segment_len)
    steps_per_epoch = 30

    history = model.fit_generator(generator=train_generator,
                                  steps_per_epoch=steps_per_epoch,
                                  epochs=50,
                                  validation_data=test_generator,
                                  validation_steps=2)

    save_history(history, name)
    model.save(name + '.h5')

    return model
示例#29
0
        z = Variable(z, volatile=True)
    random_image = G(z)
    fixed_image = G(fixed_z)
    G.train()  # stop test and start train

    p = DIR + '/Random_results/MNIST_GAN_' + str(epoch + 1) + '.png'
    fixed_p = DIR + '/Fixed_results/MNIST_GAN_' + str(epoch + 1) + '.png'
    utils.save_result(random_image, (epoch+1), save=True, path=p)
    utils.save_result(fixed_image, (epoch+1), save=True, path=fixed_p)
    train_hist['D_losses'].append(torch.mean(torch.FloatTensor(D_losses)))
    train_hist['G_losses'].append(torch.mean(torch.FloatTensor(G_losses)))
    train_hist['per_epoch_times'].append(per_epoch_time)

end_time = time()
total_time = end_time - end_time
print("Avg per epoch time: %.2f, total %d epochs time: %.2f" % (torch.mean(torch.FloatTensor(train_hist['per_epoch_times'])), EPOCH, total_time))
print("Training finish!!!...")

# save parameters
torch.save(G.state_dict(), DIR + "/generator_param.pkl")
torch.save(D.state_dict(), DIR + "/discriminator_param.pkl")

# save history
p = DIR + '/history.png'
utils.save_history(train_hist, save=True, path=p)

# save animation
prefix = DIR + '/Fixed_results/MNIST_GAN_'
p = DIR + '/animation.gif'
utils.save_animation(EPOCH, prefix=prefix, path=p)
示例#30
0
            class_mode='binary')
        print(train_generator.class_indices)

        # 訓練
        history = model.fit_generator(generator=train_generator,
                                      steps_per_epoch=int(np.floor(2000 / 32)),
                                      epochs=50,
                                      validation_data=validation_generator,
                                      validation_steps=int(np.floor(800 / 32)))
        utils.plot_history(history)

        # 結果を保存
        model.save(os.path.join(config.result_dir, 'scratch_model.h5'))
        model.save_weights(
            os.path.join(config.result_dir, 'scratch_weights.h5'))
        utils.save_history(
            history, os.path.join(config.result_dir, 'scratch_history.txt'))

    except (KeyboardInterrupt, SystemExit):
        utils.unlock()
        utils.error(config.syserr)
    except LunaExcepion as e:
        utils.error(e.value)
        if (e.value == config.locked):
            exit()
            logger.info("------ end ------")
    except Exception as e:
        logger.error(e)
        logger.error(traceback.format_exc())
        utils.error(config.syserr)
    utils.unlock()
    logger.info("------ end ------")