Exemple #1
0
def train_model(model, X_train, y_train):
    history = model.fit(X_train,
                        y_train,
                        epochs=200,
                        batch_size=200,
                        verbose=1,
                        validation_split=0.2)
    plot_history(history)
    return history
Exemple #2
0
def main():
    train_ds, val_ds = build_image_generator()

    model = build_model()
    model.summary()

    history = model.fit(train_ds, validation_data=val_ds, epochs=10)
    model.save('saved_model')

    plot_history(history)
Exemple #3
0
    def fit_directory(self,
                      path,
                      batch_size,
                      epochs,
                      val_path=None,
                      save_weights=False):
        train_generator = self._train_generator(path, batch_size)
        if val_path is None:
            validation_generator = None
            validation_steps = None
        else:
            validation_generator = self._test_val_generator(
                val_path, batch_size)
            validation_steps = validation_generator.samples / batch_size

        callbacks = [
            TensorBoard(log_dir='logs/' + self.born_time),
            ReduceLROnPlateau(monitor='val_loss',
                              factor=0.45802,
                              patience=10,
                              verbose=1)
        ]
        if save_weights:
            callbacks.append(
                ModelCheckpoint(os.path.join(
                    self.dump_path,
                    'weights_' + time.strftime('%Y%m%d%H%M%S', time.gmtime())),
                                verbose=1,
                                period=20))

        history = self.model.fit_generator(
            train_generator,
            steps_per_epoch=train_generator.samples / batch_size,
            epochs=epochs,
            validation_data=validation_generator,
            validation_steps=validation_steps,
            callbacks=callbacks)
        utils.plot_history(history,
                           self.dump_path,
                           identifier='e' + str(epochs) + '_b' +
                           str(batch_size))
        with open(
                os.path.join(
                    self.dump_path, 'e' + str(epochs) + '_b' +
                    str(batch_size) + '_history.pklz'), 'wb') as f:
            cPickle.dump((history.epoch, history.history, history.params,
                          history.validation_data, self.model.get_config()), f,
                         cPickle.HIGHEST_PROTOCOL)
        if save_weights:
            self.model.save_weights(
                os.path.join(
                    self.dump_path, 'e' + str(epochs) + '_b' +
                    str(batch_size) + '_weights.h5'))
        return history
Exemple #4
0
def main(debug: bool = False, eager: bool = False):

    if debug:
        import debugpy

        print("Waiting for debugger...")
        debugpy.listen(5678)
        debugpy.wait_for_client()

    X_train, _1, X_test, _2 = dataget.image.mnist(global_cache=True).get()
    # Now binarize data
    X_train = (X_train > 0).astype(jnp.float32)
    X_test = (X_test > 0).astype(jnp.float32)

    print("X_train:", X_train.shape, X_train.dtype)
    print("X_test:", X_test.shape, X_test.dtype)

    model = elegy.Model(
        module=VariationalAutoEncoder.defer(),
        loss=[KLDivergence(), BinaryCrossEntropy(on="logits")],
        optimizer=optix.adam(1e-3),
        run_eagerly=eager,
    )

    epochs = 10

    # Fit with datasets in memory
    history = model.fit(
        x=X_train,
        epochs=epochs,
        batch_size=64,
        steps_per_epoch=100,
        validation_data=(X_test, ),
        shuffle=True,
    )
    plot_history(history)

    # get random samples
    idxs = np.random.randint(0, len(X_test), size=(5, ))
    x_sample = X_test[idxs]

    # get predictions
    y_pred = model.predict(x=x_sample)

    # plot results
    plt.figure(figsize=(12, 12))
    for i in range(5):
        plt.subplot(2, 5, i + 1)
        plt.imshow(x_sample[i], cmap="gray")
        plt.subplot(2, 5, 5 + i + 1)
        plt.imshow(y_pred["image"][i], cmap="gray")

    plt.show()
Exemple #5
0
def main():
    """Main function of the program.

    The function loads the dataset and calls training and validation functions.
    """
    model, optimizer, args = initializer()
    train_loader, test_loader, exit_tags = utils.load_dataset(args)

    # disable training
    if args.testing:
        result = validate(args, model, test_loader)
        #print('\nThe avg val_loss: {:.4f}, avg val_cost: {:.2f}%, avg val_acc: {:.2f}%\n'
        #      .format(result['val_loss'], result['cost'], result['acc']))
        #examine(args, model, test_loader)
        return

    if args.two_stage:
        args.loss_func = "v0"

    for epoch in range(args.start_epoch, args.epochs + 1):
        print('{:3d}:'.format(epoch), end='')

        # two-stage training uses the loss version-1 after training for 25 epochs
        if args.two_stage and epoch > 25:
            args.loss_func = "v1"

        # use adaptive learning rate
        if args.adaptive_lr:
            utils.adaptive_learning_rate(args, optimizer, epoch)

        result = {'epoch': epoch}
        result.update(train(args, model, train_loader, optimizer, exit_tags))

        # validate and keep history at each log interval
        if epoch % args.log_interval == 0:
            result.update(validate(args, model, test_loader))
            utils.save_history(args, result)

        # save model parameters
        if not args.no_save_model:
            utils.save_model(args, model, epoch)

    # print the best validation result
    best_epoch = utils.close_history(args)

    # save the model giving the best validation results as a final model
    if not args.no_save_model:
        utils.save_model(args, model, best_epoch, True)

    utils.plot_history(args)
Exemple #6
0
def main():
    fname = str(datetime.now()).replace(':', '_').replace(' ', '_')[5:19]
    create_dirs_write_config(fname, config, 'cnn')

    spectrogram, melfilters = load_mel_spectrogram_db(original_audio_list[config['audio']], config)
    print('Finished loading audio and creating spectrogram, shape: {}\n'.format(spectrogram.shape))
    print('Min/max spectrogram: {}, {}'.format(np.min(spectrogram), np.max(spectrogram)))

    config['start_offset'] += config['use_prev_frames']
    spectrogram = spectrogram.T  # rows should be different training examples, so different points in time
    mm_scaler = MinMaxScaler()

    spectrogram = mm_scaler.fit_transform(spectrogram)
    print('Min/max spectrogram post-scaling: {}, {}'.format(np.min(spectrogram), np.max(spectrogram)))

    X_train = np.zeros((config['n_train'], config['use_prev_frames'], spectrogram.shape[1]))

    for i in range(config['use_prev_frames']):
        X_train[:, i, :] = spectrogram[i:i + config['n_train'], :]

    y_train = spectrogram[config['use_prev_frames']:config['n_train']+config['use_prev_frames'], :]

    cnn = get_model((X_train.shape[1], X_train.shape[2]))

    callbacks = get_callbacks(fname, config)
    history = cnn.fit(X_train, y_train, epochs=config['n_epochs'], batch_size=config['batch_size'],
                      validation_split=0.1,
                      verbose=1, callbacks=callbacks)

    write_keras_model(fname, config, 'cnn', cnn)

    plot_history(fname, config, 'cnn', history)

    output_spectrogram = np.zeros((config['n_test'] + config['use_prev_frames'], spectrogram.shape[1]))

    output_spectrogram[:config['use_prev_frames'], :] = spectrogram[config['test_offset']:config['test_offset']
                                                                                          + config['use_prev_frames'], :]

    print('Running prediction')

    for i in range(config['n_test']):
        cnn_input = output_spectrogram[i:i + config['use_prev_frames'], :].reshape([1, config['use_prev_frames'],
                                                                                    config['n_mel']])
        cnn_output = cnn.predict(cnn_input)
        cnn_output = cnn_output.clip(0., 1.)
        output_spectrogram[config['use_prev_frames'] + i, :] = cnn_output

    convert_output_to_audio(output_spectrogram, config, mm_scaler, melfilters, fname, 'cnn')
def main():
    transform_train = torchvision.transforms.Compose([
        torchvision.transforms.RandomCrop(32, padding=4, padding_mode='edge'),
        torchvision.transforms.RandomHorizontalFlip(),
        # torchvision.transforms.RandomRotation(15),
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = torchvision.transforms.Compose([
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    training_data = torchvision.datasets.CIFAR10('./data/cifar10', train=True, transform=transform_train, download=True)
    testing_data = torchvision.datasets.CIFAR10('./data/cifar10', train=False, transform=transform_test, download=True)

    model = resnet_v2(input_shape, depth, n_classes)
    model.summary()
    model.compile(Adam(lr=0.001), 'categorical_crossentropy', 'accuracy')

    def lr_schduler(model):
        lr = 1e-3
        if model.n_epoch > 180:
            lr *= 0.5e-3
        elif model.n_epoch > 160:
            lr *= 1e-3
        elif model.n_epoch > 120:
            lr *= 1e-2
        elif model.n_epoch > 80:
            lr *= 1e-1
        model.optimizer.lr = lr
        print('Learning rate: ', lr)
        return lr

    lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                                   cooldown=0,
                                   patience=6,
                                   min_lr=0.5e-6)

    history = model.fit_torchvision(training_data, batch_size=batch_size, epochs=epochs,
                                    validation_data=testing_data, callbacks=[lr_schduler])

    plot_history(history, 'cifar10_resnet56_v2.jpg')

    model.save('resnet56_v2.h8')
Exemple #8
0
def task(param):
    output_path = 'output/'
    if len(sys.argv) > 1:
        output_path = sys.argv[1]
        if len(sys.argv) > 2 and sys.argv[2] == 'plot':
            plot_history(output_path)
            exit(0)
    else:
        if os.path.exists(output_path):
            i = 1
            while os.path.exists(output_path):
                output_path = 'output%d/' % i
                i += 1
    if not os.path.exists(output_path):
        os.makedirs(output_path)
    logging.info('output path: {}'.format(output_path))
    train_model(output_path, param)
Exemple #9
0
def plot(update, context):

    args = context.args

    if not args:
        return update.message.reply_text('Please provide ticker as argument')

    ticker = args[0].upper()
    interval = "1h"
    period = "1y"

    # Validate interval arg
    if len(args) == 2:
        interval = args[1]
        if interval not in INTERVALS:
            return update.message.reply_text(
                f'{interval} not a valid interval')
        if interval.endswith('m'):
            period = "1mo"

    # Get historical market data
    stock = yf.Ticker(ticker)
    hist = stock.history(period=period, interval=interval, prepost=True)

    # If ticker is not valid, will appear in _ERRORS dict
    if ticker in yf.shared._ERRORS:
        return update.message.reply_text(f'{yf.shared._ERRORS[ticker]}')

    # Keep last data points
    hist = hist.tail(100)

    # Plot
    plot_history(hist, ticker)
    plt.savefig('image.png', bbox_inches='tight')
    plt.clf()

    # Upload chart to AWS so its available to anyone
    aws_file = f'{uuid.uuid4()}.png'
    aws_url = f'https://stocks-bot.s3-sa-east-1.amazonaws.com/{aws_file}'
    upload_to_aws('image.png', 'stocks-bot', aws_file)

    # Response
    update.message.reply_photo(aws_url)
Exemple #10
0
def train(g_model,
          d_model,
          gan_model,
          dataset,
          latent_dim,
          epochs=20,
          batch=64):
    batch_per_epoch = int(dataset.shape[0] / batch)

    steps = batch_per_epoch * epochs

    half_batch = int(batch / 2)

    d1_hist, d2_hist, g_hist = list(), list(), list()

    for i in range(steps):
        X_real, y_real = generate_real_samples(dataset, half_batch)

        X_fake, y_fake = generate_fake_samples(g_model, latent_dim, half_batch)

        # update discriminator model
        d_loss1 = d_model.train_on_batch(X_real, y_real)
        d_loss2 = d_model.train_on_batch(X_fake, y_fake)

        # update generator via discriminator's loss
        z_input = generate_latent_points(latent_dim, batch)
        y_real2 = np.ones((batch, 1))

        g_loss = gan_model.train_on_batch(z_input, y_real2)

        print("{:d}, d1={:.3f}, d2={:.3f}, g={:.3f}".format(
            i + 1, d_loss1, d_loss2, g_loss))

        d1_hist.append(d_loss1)
        d2_hist.append(d_loss2)
        g_hist.append(g_loss)

        if (i + 1) % (batch_per_epoch * 1) == 0:
            summarize_performance(i, g_model, latent_dim)

    plot_history(d1_hist, d2_hist, g_hist)
Exemple #11
0
def simulate(id, crypto_amount, assets_jpy, coef, crypto_name, start, end, result_root="simulations/", save=False,
             verbose=False):

    df_historical_price = get_historical_price(crypto_name=crypto_name, start=start, end=end)
    history = run_simulation(crypto_amount, assets_jpy, coef, df_historical_price)

    final_state = history[-1]
    # Count of (buy/sell/nothing)
    count = collections.defaultdict(int)

    for term in history:
        count[term['state']] += 1

    summary = {
        "earn_rate": (final_state['total'] - assets_jpy) / assets_jpy,
        "final_total": final_state['total'],
        "final_jpy": final_state["JPY"],
        "final_crypto_amount": final_state['crypto_amount'],
        **count
    }

    if save:
        sub_path = f"#{id}_{crypto_name}_{assets_jpy:.1f}_coef[{'_'.join(map(lambda x: str(x), coef))}]_{start.date()}_{end.date()}"
        result_path = os.path.join(result_root, sub_path)
        if not os.path.exists(result_path):
            os.makedirs(result_path)

        history_file = os.path.join(result_path, "history.xlsx")
        plot_file = os.path.join(result_path, "plot.png")

        df_history = pd.DataFrame(history)
        df_history.to_excel(history_file)
        plot_history(df_history, plot_file)

    if verbose:
        print(f"Result of running: {sub_path}:")
        print(summary)

    return summary
Exemple #12
0
    def fit_directory(self,
                      path,
                      batch_size,
                      epochs,
                      val_path=None,
                      save_weights=False):
        train_generator = self._train_generator(path, batch_size)
        if val_path is None:
            validation_generator = None
            validation_steps = None
        else:
            validation_generator = self._test_val_generator(
                val_path, batch_size)
            validation_steps = validation_generator.samples / batch_size

        history = self.model.fit_generator(
            train_generator,
            steps_per_epoch=train_generator.samples / batch_size,
            epochs=epochs,
            validation_data=validation_generator,
            validation_steps=validation_steps)
        utils.plot_history(history,
                           self.dump_path,
                           identifier='e' + str(epochs) + '_b' +
                           str(batch_size))
        with open(
                os.path.join(
                    self.dump_path, 'e' + str(epochs) + '_b' +
                    str(batch_size) + '_history.pklz'), 'wb') as f:
            cPickle.dump((history.epoch, history.history, history.params,
                          history.validation_data, self.model.get_config()), f,
                         cPickle.HIGHEST_PROTOCOL)
        if save_weights:
            self.model.save_weights(
                os.path.join(
                    self.dump_path, 'e' + str(epochs) + '_b' +
                    str(batch_size) + '_weights.h5'))
        return history
Exemple #13
0
def main():
    (X_train, y_train), (X_valid, y_valid), (X_test,
                                             y_test) = make_mnist_data()
    print(f'X_train: {X_train.shape}')
    print(f'X_valid: {X_valid.shape}')
    print(f'X_test  : {X_test.shape}')
    print(f'y_train: {y_train.shape}')
    print(f'y_valid: {y_valid.shape}')
    print(f'y_test: {y_test.shape}')

    model = make_model()

    history = model.fit(X_train,
                        y_train,
                        batch_size=batch_size,
                        epochs=epochs,
                        validation_data=(X_valid, y_valid))

    test_loss, test_acc = model.evaluate(X_test, y_test)
    print()
    print(f'Test loss: {test_loss}, test acc: {test_acc}')

    plot_history(history, 'his.jpg')
Exemple #14
0
def main():
    argv = sys.argv

    if len(argv) != 4:
        print('Usage: ' + argv[0] + ' model_name dataset epochs')
        sys.exit(0)

    model_name = argv[1]
    data = datautils.load(argv[2])

    normalized, mean, std = datautils.normalize(data)
    differentiated = datautils.differentiate(normalized)
    (train, test) = datautils.split(differentiated, 0.7)

    # utils.plot_data(data)

    print("training set length: {}".format(len(train)))
    print("test set length: {}".format(len(test)))
    """train"""
    model = LSTM()
    time_steps = 100  # window size
    batch_size = 8  # data augmentation
    history = model.train(model_name, train, int(argv[3]), batch_size,
                          time_steps)
    utils.plot_history(history)
    """test"""
    head = int(len(test) * 0.6)
    tail = len(test) - head
    projection = model.evaluate(model_name, test[:head], tail)
    """plot"""
    train = datautils.undifferentiate(train, normalized[0])
    test = datautils.undifferentiate(test, train[-1])
    projection = datautils.undifferentiate(projection, test[-1])
    testset_denorm = datautils.denormalize(test, mean, std)
    results_denorm = datautils.denormalize(projection, mean, std)
    utils.plot_multiple([testset_denorm, results_denorm], [0, head])
Exemple #15
0
            batch_size=32,
            class_mode='binary')
        validation_generator = test_datagen.flow_from_directory(
            config.validation_dir,
            target_size=(150, 150),
            batch_size=32,
            class_mode='binary')
        print(train_generator.class_indices)

        # 訓練
        history = model.fit_generator(generator=train_generator,
                                      steps_per_epoch=int(np.floor(2000 / 32)),
                                      epochs=50,
                                      validation_data=validation_generator,
                                      validation_steps=int(np.floor(800 / 32)))
        utils.plot_history(history)

        # 結果を保存
        model.save(os.path.join(config.result_dir, 'scratch_model.h5'))
        model.save_weights(
            os.path.join(config.result_dir, 'scratch_weights.h5'))
        utils.save_history(
            history, os.path.join(config.result_dir, 'scratch_history.txt'))

    except (KeyboardInterrupt, SystemExit):
        utils.unlock()
        utils.error(config.syserr)
    except LunaExcepion as e:
        utils.error(e.value)
        if (e.value == config.locked):
            exit()
Exemple #16
0
def train(train_dataset, valid_dataset, validation_bool, test_dataset,
          fam_dict_path, num_column, num_trains, num_tests, test_file_path,
          args):
    # load model
    model = rna_model.DeepRfam(seq_length=args.seq_length,
                               num_c=num_column,
                               num_filters=args.num_filters,
                               filter_sizes=args.filter_sizes,
                               dropout_rate=args.keep_prob,
                               num_classes=args.num_classes,
                               num_hidden=args.num_hidden)
    print(model.summary())

    # model compile
    model.compile(
        loss=args.loss_function,
        optimizer=eval(f"optimizers.{args.optimizer}")(lr=args.learning_rate),
        metrics=['accuracy'])

    # start and record training history
    if validation_bool:
        train_history = model.fit_generator(train_dataset,
                                            epochs=args.num_epochs,
                                            verbose=1,
                                            validation_data=valid_dataset,
                                            use_multiprocessing=True,
                                            workers=6)
    else:
        train_history = model.fit_generator(train_dataset,
                                            epochs=args.num_epochs,
                                            verbose=1,
                                            use_multiprocessing=True,
                                            workers=6)

    # # test accuracy
    # t1 = time.time()
    # scores = model.evaluate_generator(test_dataset, steps=num_tests // args.batch_size + 1)
    # delta_t = time.time() - t1
    # print(f"Running time (Prediction):{delta_t} (s)\nAccuracy:{scores[1]}")
    # print(f"Running time (Prediction):{delta_t} (s)\nAccuracy:{scores[1]}")

    # =================================logging=============================================
    local_time = time.strftime("%m-%d_%H-%M", time.localtime())
    # determine log file name and `mkdir`
    if args.log_name is None:
        log_file_name = local_time
    else:
        log_file_name = local_time + '_' + args.log_name
    # os.system(f"mkdir -p {args.log_dir}/{log_file_name}")
    os.makedirs(f"{args.log_dir}/{log_file_name}")

    # save model to .h5 file
    model.save(f"{args.log_dir}/{log_file_name}/{log_file_name}.h5")

    # save the image of model structure
    plot_model(model,
               to_file=f"{args.log_dir}/{log_file_name}/model_structure.png",
               show_shapes=True)

    # save confusion matrix into .csv file
    # prediction = model.predict_generator(test_generator, workers=6, use_multiprocessing=True)
    prediction = model.predict_generator(
        test_generator)  # don't use the multiprocessing

    # get the list of true label
    with open(test_file_path) as f:
        label_list = []
        for line in f:
            line = line.strip()
            seq_index = line.split(',').pop(0)
            if seq_index != '':
                label_list.append(int(seq_index))
            else:
                pass

    prediction = prediction[:len(label_list)]
    prediction_1d = np.array(
        [np.argmax(prediction) for prediction in prediction])
    # print("Length of true label:", len(label_list))
    # print("Length of predict label:", len(prediction_1d))
    utils.cm2csv(true_labels=label_list,
                 predicted_labels=prediction_1d,
                 dict_file=fam_dict_path,
                 save_dir=f"{args.log_dir}/{log_file_name}")
    print('Accuracy:', accuracy_score(label_list, prediction_1d))

    # generate the confusion matrix
    if args.num_classes <= 20:
        utils.plot_cm(true_labels=label_list,
                      predicted_labels=prediction_1d,
                      dict_file=fam_dict_path,
                      title=f'Confusion Matrix',
                      save_dir=f"{args.log_dir}/{log_file_name}")
    else:
        pass

    # draw and save history plot
    utils.plot_history(train_history, f"{args.log_dir}/{log_file_name}")

    # save the classification report
    utils.classification_report(true_labels=label_list,
                                predicted_labels=prediction_1d,
                                dict_file=fam_dict_path,
                                save_dir=f"{args.log_dir}/{log_file_name}",
                                std_out=True)

    # save history to .csv file
    with open(f"{args.log_dir}/history.csv", 'a') as csv:
        print(
            f'{local_time},{log_file_name},{args.dataset},{accuracy_score(label_list, prediction_1d)},{str(args.filter_sizes).replace(","," ")},{args.num_filters},{args.batch_size},{args.num_epochs},{args.keep_prob},{args.num_hidden},{args.learning_rate},{args.loss_function},{args.optimizer}, ',
            file=csv)
        np.array(v_rel_labels).shape)
    x_train = [
        np.array(v_q_words),
        np.array(v_d_words),
        np.array(v_rel_labels)
    ]
    # print(np.array(x_train).shape)
    history = model.fit(x=x_train,
                        y=np.array(y_train),
                        batch_size=config_model_train["batch_size"],
                        epochs=config_model_train["epochs"],
                        verbose=config_model_train["verbose"],
                        shuffle=config_model_train["shuffle"])

    # save trained model
    print("Saving model and its weights ...")
    model.save_weights(config_model_train["weights"] + ".h5")
    # serialize model to JSON
    model_json = model.to_json()
    with open(
            join(config_model_train["train_details"],
                 config_model_param["model_name"] + ".json"),
            "w") as json_file:
        json_file.write(model_json)
    print("Saved model to disk.")

    print("Plotting history ...")
    plot_history(history, config_model_train["train_details"],
                 config_model_param["model_name"])
    print("Done.")
Exemple #18
0
def main(debug: bool = False, eager: bool = False, logdir: str = "runs"):

    if debug:
        import debugpy

        print("Waiting for debugger...")
        debugpy.listen(5678)
        debugpy.wait_for_client()

    current_time = datetime.now().strftime("%b%d_%H-%M-%S")
    logdir = os.path.join(logdir, current_time)

    X_train, _1, X_test, _2 = dataget.image.mnist(global_cache=True).get()

    print("X_train:", X_train.shape, X_train.dtype)
    print("X_test:", X_test.shape, X_test.dtype)

    class MLP(elegy.Module):
        """Standard LeNet-300-100 MLP network."""

        def __init__(self, n1: int = 300, n2: int = 100, **kwargs):
            super().__init__(**kwargs)
            self.n1 = n1
            self.n2 = n2

        def call(self, image: jnp.ndarray):
            image = image.astype(jnp.float32) / 255.0
            x = elegy.nn.Flatten()(image)
            x = elegy.nn.sequential(
                elegy.nn.Linear(self.n1),
                jax.nn.relu,
                elegy.nn.Linear(self.n2),
                jax.nn.relu,
                elegy.nn.Linear(self.n1),
                jax.nn.relu,
                elegy.nn.Linear(x.shape[-1]),
                jax.nn.sigmoid,
            )(x)
            return x.reshape(image.shape) * 255

    class MeanSquaredError(elegy.losses.MeanSquaredError):
        # we request `x` instead of `y_true` since we are don't require labels in autoencoders
        def call(self, x, y_pred):
            return super().call(x, y_pred)

    model = elegy.Model(
        module=MLP(n1=256, n2=64),
        loss=MeanSquaredError(),
        optimizer=optax.rmsprop(0.001),
        run_eagerly=eager,
    )

    model.summary(X_train[:64])

    # Notice we are not passing `y`
    history = model.fit(
        x=X_train,
        epochs=20,
        batch_size=64,
        validation_data=(X_test,),
        shuffle=True,
        callbacks=[elegy.callbacks.TensorBoard(logdir=logdir, update_freq=300)],
    )

    plot_history(history)

    # get random samples
    idxs = np.random.randint(0, 10000, size=(5,))
    x_sample = X_test[idxs]

    # get predictions
    y_pred = model.predict(x=x_sample)

    # plot and save results
    with SummaryWriter(os.path.join(logdir, "val")) as tbwriter:

        figure = plt.figure(figsize=(12, 12))
        for i in range(5):
            plt.subplot(2, 5, i + 1)
            plt.imshow(x_sample[i], cmap="gray")
            plt.subplot(2, 5, 5 + i + 1)
            plt.imshow(y_pred[i], cmap="gray")

        # tbwriter.add_figure("AutoEncoder images", figure, 20)

    plt.show()

    print(
        "\n\n\nMetrics and images can be explored using tensorboard using:",
        f"\n \t\t\t tensorboard --logdir {logdir}",
    )
def main():
    args = get_args()

    frozen_model, trainable_model, reminder_layers = get_base_models(args)
    num_classes = len(os.listdir(args.target_data_train_dir))

    if args.augment_images:
        train_datagen = image.ImageDataGenerator(
            rescale=1. / 255,
            rotation_range=40,
            width_shift_range=0.2,
            height_shift_range=0.2,
            shear_range=0.2,
            zoom_range=0.2,
            horizontal_flip=True,
            fill_mode='nearest',
            validation_split=0.1
            if args.target_data_valid_dir == None else 0.0)
    else:
        train_datagen = image.ImageDataGenerator(
            rescale=1. / 255,
            validation_split=0.1
            if args.target_data_valid_dir == None else 0.0)

    train_data = train_datagen.flow_from_directory(args.target_data_train_dir,
                                                   target_size=(299, 299),
                                                   shuffle=True,
                                                   batch_size=args.batch_size,
                                                   subset='training')

    # if validation directory not set use some training image for validation
    if args.target_data_valid_dir == None:
        valid_data = train_datagen.flow_from_directory(
            args.target_data_train_dir,
            target_size=(299, 299),
            shuffle=True,
            batch_size=args.batch_size,
            subset='validation')

    # construct rider model
    rider_model = construct_rider_model(trainable_model, frozen_model,
                                        reminder_layers, num_classes)

    if args.gpu_util > 1:
        # Employs multiple gpus for training if specified
        model = multi_gpu_model(model, gpus=args.gpu_util)

    rider_model.compile('Adadelta',
                        loss='categorical_crossentropy',
                        metrics=['accuracy'])

    if not args.target_data_valid_dir == None:
        valid_datagen = image.ImageDataGenerator(rescale=1. / 255)
        valid_data = valid_datagen.flow_from_directory(
            args.target_data_valid_dir,
            target_size=(299, 299),
            batch_size=args.batch_size)
        history = rider_model.fit_generator(train_data,
                                            epochs=args.max_epochs,
                                            validation_data=valid_data)
    else:
        history = rider_model.fit_generator(train_data,
                                            epochs=args.max_epochs,
                                            validation_data=valid_data)

    # Save all of the data for the run
    run_name = '{}_{}_rider_network_{}_total_{}_batch'.format(
        args.dataset_name, args.model_to_train, args.max_epochs,
        args.batch_size)

    if args.gpu_util > 1:
        # if using a multi gpu model, extract the appropriate layers
        rider_model = model.get_layer('model_1')

    rider_model.save(
        os.path.join(args.output_dir, '{}_ckpt.h5'.format(run_name)))
    try:
        pickle.dump(
            dict(history.history),
            open(
                os.path.join(args.output_dir, '{}_history.p'.format(run_name)),
                'wb'))
        print('Successfully dumped history file')
    except:
        print('History dump failed')
    plot_history(history, os.path.join(args.output_dir, run_name), True)
Exemple #20
0
    x_train = x_train[:(x_train.shape[0] - (x_train.shape[0] % seq_length))]
    y_train = y_train[:(y_train.shape[0] - (y_train.shape[0] % seq_length))]
    x_test = x_test[:(x_test.shape[0] - (x_test.shape[0] % seq_length))]
    y_test = y_test[:(y_test.shape[0] - (y_test.shape[0] % seq_length))]
    x_train = x_train.reshape((-1, seq_length, 84, 84, 1))
    y_train = y_train.reshape((-1, seq_length, 128))
    x_test = x_test.reshape((-1, seq_length, 84, 84, 1))
    y_test = y_test.reshape((-1, seq_length, 128))

print(x_train.shape, y_train.shape)

model = model_type(layer_sizes=layer_sizes,
                   model_type=model_type,
                   seq_length=seq_length).build()
model.summary()

# sgd = optimizers.SGD(learning_rate=0.0001, momentum=0.0, nesterov=False)
model.compile(loss='mse', optimizer='adam', metrics=['mse', 'mae'])

history = model.fit(x_train,
                    y_train,
                    validation_data=(x_test, y_test),
                    epochs=num_epochs,
                    batch_size=batch_size,
                    shuffle=True)

utils.save_model(model,
                 model_type,
                 model_path=os.path.join('./saved_model/', args.game_name))
utils.plot_history(history, '{}_{}'.format(args.game_name, args.model))
             next_neighbors=8)([points_input, x])  # conv with fixed graph
x = layers.Activation("relu")(x)
y = EdgeConv(lambda a: kernel_nn(a, nodes=32),
             next_neighbors=16)([points_input, x])  # conv with fixed graph
x = layers.Activation("relu")(y)
x = layers.GlobalAveragePooling1D(data_format='channels_first',
                                  name="embedding")(x)
out = layers.Dense(2, name="classification", activation="softmax")(x)

model = keras.models.Model([points_input, feats_input], out)
model.summary()

model.compile(loss="binary_crossentropy",
              optimizer=keras.optimizers.Adam(3E-3, decay=1E-4),
              metrics=['acc'])

history = model.fit(train_input_data, y_train, batch_size=64, epochs=2)

fig = utils.plot_history(history.history)
fig.savefig("./history_sphere.png")

# Draw Graph in each EdgeConv layerser
fig = utils.get_edge_graph_in_model(test_input_data, model, sample_idx=test_id)
fig.savefig("./cr_sphere_dynamic.png")

# Draw contribution (class score) of each individual cosmic ray
utils.draw_signal_contribution(model,
                               test_input_data,
                               test_id=test_id,
                               path="./signal_contributions_of_cosmic_ray.png")
Exemple #22
0
import simulator
from utils import plot_history


init = {'victim': 0, 'predator': 2}
value_range = 3


def vp(config):
    impacts = dict(victim=0, predator=0)
    if config['victim'] < config['predator']:
        impacts['victim'] = -1
        impacts['predator'] = -1
    elif config['victim'] > config['predator']:
        impacts['victim'] = 1
        impacts['predator'] = 1
    else:
        impacts['victim'] = 1
        impacts['predator'] = -1
    return impacts


response = simulator.run("report", value_range, init, vp, nsteps=150)
print(response)
response = plot_history("report")
print(response)

Exemple #23
0
def train_model(output_path, param):
    batch_size = int(sys.argv[2])
    val_steps = 11407 // batch_size
    train_steps = 216708 // batch_size
    # val_steps = 35
    # train_steps = 677
    epochs = 10
    with Parallel_image_transformer('fashion_data/train_95-ac.txt',
                                    (batch_size, 224, 224, 3)) as train_gen:
        with Parallel_image_transformer('fashion_data/validation_95-ac.txt',
                                        (batch_size, 224, 224, 3)) as val_gen:
            # with Parallel_np_arrays_reader(os.path.join(btl_path, 'btl_train_npz.txt'), ['attr_cls'], 5) as train_gen:
            #     with Parallel_np_arrays_reader(os.path.join(btl_path, 'btl_validation_npz.txt'), ['attr_cls'], 5) as val_gen:
            log_path = os.path.join(output_path, 'model_train.csv')
            csv_log = CSVLogger(log_path, separator=';', append=False)
            filepath = os.path.join(
                output_path,
                "best_model-{epoch:03d}-{loss:.4f}-{val_loss:.4f}.h5")
            # early_stopper = EarlyStopping(min_delta=0.001, patience=10)
            checkpoint = ModelCheckpoint(filepath,
                                         monitor='loss',
                                         verbose=1,
                                         save_best_only=False,
                                         save_weights_only=False,
                                         mode='auto',
                                         period=1)
            lrate = LearningRateScheduler(step_decay)
            # callbacks_list = [csv_log, checkpoint, early_stopper]
            callbacks_list = [csv_log, checkpoint, lrate]

            model = create_model(False, (224, 224, 3), param)
            # model = create_model(True, (7, 7, 2048), param)
            # model = ResNet((160, 96, 3), 4, basic_block, repetitions=[2, 2, 2, 1])
            model.load_weights('output3/best_weights.hdf5', by_name=True)
            # model = load_model('output4/best_model-001-1.2537-1.3404.h5')
            for layer in model.layers:
                if layer.name not in {'pr_cls', 'conv_5_2'}:
                    layer.trainable = False
            print(model.summary())
            plot_model(model,
                       to_file=os.path.join(output_path, 'model.png'),
                       show_shapes=True,
                       show_layer_names=False)
            # ## Compile
            model.compile(#optimizer=SGD(lr=0.1, momentum=0.9, nesterov=True),
                          optimizer=Adam(lr=1e-3, decay=1e-5),
                          # optimizer=Adadelta(),
                          # loss='binary_crossentropy',
                          loss={'pr_attr': 'binary_crossentropy', 'pr_cls': 'categorical_crossentropy'},
                          # loss_weights=[1., 0.05],
                          metrics=['categorical_accuracy'])
            t_begin = datetime.datetime.now()
            ## Fit
            model.fit_generator(train_gen,
                                steps_per_epoch=train_steps,
                                epochs=epochs,
                                validation_data=val_gen,
                                validation_steps=val_steps,
                                verbose=1,
                                callbacks=callbacks_list)

    print(datetime.datetime.now())
    print('total_time: {}'.format(str(datetime.datetime.now() - t_begin)))
    print('model saved to: {}'.format(output_path))
    model.save(os.path.join(output_path, 'final_model.h5'))
    model.save_weights(os.path.join(output_path, 'final_weights.hdf5'))
    plot_history(output_path)
Exemple #24
0
    optimizer=optimizers.RMSprop(lr=0.003),
    loss='categorical_crossentropy',
    metrics=['accuracy'],
)
model.summary()

# Implementation of early stopping
my_callback = EarlyStopping(monitor='val_accuracy',
                            patience=10,
                            restore_best_weights=True)

# Train model
batch_size = 128
epochs = 500
history = model.fit(x_train,
                    y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    callbacks=[my_callback],
                    validation_split=0.2)

# Plotting
plot_history(history)

# Evaluate model
scores = model.evaluate(x_test, y_test)
print('Test loss: {} - Accuracy: {}'.format(*scores))

# Save the model
save_keras_model(model, "../deliverable/nn_task1.h5")
Exemple #25
0
def main(debug: bool = False, eager: bool = False):

    if debug:
        import debugpy

        print("Waiting for debugger...")
        debugpy.listen(5678)
        debugpy.wait_for_client()

    X_train, y_train, X_test, y_test = dataget.image.mnist(
        global_cache=True).get()

    print("X_train:", X_train.shape, X_train.dtype)
    print("y_train:", y_train.shape, y_train.dtype)
    print("X_test:", X_test.shape, X_test.dtype)
    print("y_test:", y_test.shape, y_test.dtype)

    class MLP(elegy.Module):
        """Standard LeNet-300-100 MLP network."""
        def __init__(self, n1: int = 300, n2: int = 100, **kwargs):
            super().__init__(**kwargs)
            self.n1 = n1
            self.n2 = n2

        def call(self, image: jnp.ndarray):

            image = image.astype(jnp.float32) / 255.0

            mlp = hk.Sequential([
                hk.Flatten(),
                hk.Linear(self.n1),
                jax.nn.relu,
                hk.Linear(self.n2),
                jax.nn.relu,
                hk.Linear(10),
            ])
            return dict(outputs=mlp(image))

    model = elegy.Model(
        module=MLP.defer(n1=300, n2=100),
        loss=[
            elegy.losses.SparseCategoricalCrossentropy(from_logits=True,
                                                       on="outputs"),
            elegy.regularizers.GlobalL2(l=1e-4),
        ],
        metrics=elegy.metrics.SparseCategoricalAccuracy.defer(on="outputs"),
        optimizer=optix.rmsprop(1e-3),
        run_eagerly=eager,
    )

    history = model.fit(
        x=X_train,
        y=dict(outputs=y_train),
        epochs=100,
        steps_per_epoch=200,
        batch_size=64,
        validation_data=(X_test, dict(outputs=y_test)),
        shuffle=True,
    )

    plot_history(history)

    # get random samples
    idxs = np.random.randint(0, 10000, size=(9, ))
    x_sample = X_test[idxs]

    # get predictions
    y_pred = model.predict(x=x_sample)

    # plot results
    plt.figure(figsize=(12, 12))
    for i in range(3):
        for j in range(3):
            k = 3 * i + j
            plt.subplot(3, 3, k + 1)

            plt.title(f"{np.argmax(y_pred['outputs'][k])}")
            plt.imshow(x_sample[k], cmap="gray")

    plt.show()
Exemple #26
0
def main(args,data_dir, model_name, num, lr, epochs, batch_size = 16, download_data = False):
    """
    Main function
    
    Args:
        data_dir: directory to download Pascal VOC data
        model_name: resnet18, resnet34 or resnet50
        num: model_num for file management purposes (can be any postive integer. Your results stored will have this number as suffix)
        lr: initial learning rate list [lr for resnet_backbone, lr for resnet_fc] 
        epochs: number of training epochs
        batch_size: batch size. Default=16
        download_data: Boolean. If true will download the entire 2012 pascal VOC data as tar to the specified data_dir.
        Set this to True only the first time you run it, and then set to False. Default False 
        save_results: Store results (boolean). Default False
        
    Returns:
        test-time loss and average precision
        
    Example way of running this function:
        if __name__ == '__main__':
            main('../data/', "resnet34", num=1, lr = [1.5e-4, 5e-2], epochs = 15, batch_size=16, download_data=False, save_results=True)
    """
    

    
    # Initialize cuda parameters
    use_cuda = torch.cuda.is_available()
    np.random.seed(2019)
    torch.manual_seed(2019)
    device = torch.device("cuda" if use_cuda else "cpu")
    
    print("Available device = ", device)
    model = models.__dict__[args.arch]()
    for name, param in model.named_parameters():
        if name not in ['fc.weight', 'fc.bias']:
            param.requires_grad = False
    #model.avgpool = torch.nn.AdaptiveAvgPool2d(1)

    #model.load_state_dict(model_zoo.load_url(model_urls[model_name]))
    checkpoint = torch.load(args.pretrained, map_location="cpu")
    state_dict = checkpoint['state_dict']
    for k in list(state_dict.keys()):
        # retain only encoder_q up to before the embedding layer
        if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):
            # remove prefix
            state_dict[k[len("module.encoder_q."):]] = state_dict[k]
        # delete renamed or unused k
        del state_dict[k]
    msg = model.load_state_dict(state_dict, strict=False)
    assert set(msg.missing_keys) == {"fc.weight", "fc.bias"}

    print("=> loaded pre-trained model '{}'".format(args.pretrained))
    num_ftrs = model.fc.in_features
    model.fc = torch.nn.Linear(num_ftrs, 20)
    model.fc.weight.data.normal_(mean=0.0, std=0.01)
    model.fc.bias.data.zero_()

    model.to(device)
    parameters = list(filter(lambda p: p.requires_grad, model.parameters()))
    print("optimized parameters",parameters)
    optimizer = optim.SGD([
            {'params': parameters, 'lr': lr, 'momentum': 0.9}
            ])
    
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, 12, eta_min=0, last_epoch=-1)
    
    # Imagnet values
    mean=[0.457342265910642, 0.4387686270106377, 0.4073427106250871]
    std=[0.26753769276329037, 0.2638145880487105, 0.2776826934044154]
    
#    mean=[0.485, 0.456, 0.406]
#    std=[0.229, 0.224, 0.225]
    
    transformations = transforms.Compose([transforms.Resize((300, 300)),
#                                      transforms.RandomChoice([
#                                              transforms.CenterCrop(300),
#                                              transforms.RandomResizedCrop(300, scale=(0.80, 1.0)),
#                                              ]),                                      
                                      transforms.RandomChoice([
                                          transforms.ColorJitter(brightness=(0.80, 1.20)),
                                          transforms.RandomGrayscale(p = 0.25)
                                          ]),
                                      transforms.RandomHorizontalFlip(p = 0.25),
                                      transforms.RandomRotation(25),
                                      transforms.ToTensor(), 
                                      transforms.Normalize(mean = mean, std = std),
                                      ])
        
    transformations_valid = transforms.Compose([transforms.Resize(330), 
                                          transforms.CenterCrop(300), 
                                          transforms.ToTensor(), 
                                          transforms.Normalize(mean = mean, std = std),
                                          ])

    # Create train dataloader
    dataset_train = PascalVOC_Dataset(data_dir,
                                      year='2007',
                                      image_set='train', 
                                      download=download_data, 
                                      transform=transformations, 
                                      target_transform=encode_labels)
    
    train_loader = DataLoader(dataset_train, batch_size=batch_size, num_workers=4, shuffle=True)
    
    # Create validation dataloader
    dataset_valid = PascalVOC_Dataset(data_dir,
                                      year='2007',
                                      image_set='val', 
                                      download=download_data, 
                                      transform=transformations_valid, 
                                      target_transform=encode_labels)
    
    valid_loader = DataLoader(dataset_valid, batch_size=batch_size, num_workers=4)
    
    # Load the best weights before testing
    if not os.path.exists(args.log):
        os.mkdir(args.log)
    
    log_file = open(os.path.join(args.log, "log-{}.txt".format(num)), "w+")
    model_dir=os.path.join(args.log,"model")
    if not os.path.exists(model_dir):
        os.mkdir(model_dir)
    log_file.write("----------Experiment {} - {}-----------\n".format(num, model_name))
    log_file.write("transformations == {}\n".format(transformations.__str__()))
    trn_hist, val_hist = train_model(model, device, optimizer, scheduler, train_loader, valid_loader, model_dir, num, epochs, log_file)
    torch.cuda.empty_cache()
    
    plot_history(trn_hist[0], val_hist[0], "Loss", os.path.join(model_dir, "loss-{}".format(num)))
    plot_history(trn_hist[1], val_hist[1], "Accuracy", os.path.join(model_dir, "accuracy-{}".format(num)))    
    log_file.close()
    
    #---------------Test your model here---------------------------------------
    # Load the best weights before testing
    print("Evaluating model on test set")
    print("Loading best weights")
    weights_file_path = os.path.join(model_dir, "model-{}.pth".format(num))
    assert os.path.isfile(weights_file_path)
    print("Loading best weights")

    model.load_state_dict(torch.load(weights_file_path))
    transformations_test = transforms.Compose([transforms.Resize(330), 
                                          transforms.FiveCrop(300), 
                                          transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
                                          transforms.Lambda(lambda crops: torch.stack([transforms.Normalize(mean = mean, std = std)(crop) for crop in crops])),
                                          ])
    
    
    dataset_test = PascalVOC_Dataset(data_dir,
                                      year='2007',
                                      image_set='test',
                                      download=download_data, 
                                      transform=transformations_test, 
                                      target_transform=encode_labels)
    
    
    test_loader = DataLoader(dataset_test, batch_size=batch_size, num_workers=0, shuffle=False)
    
    loss, ap, scores, gt = test(model, device, test_loader, returnAllScores=True)
        
    gt_path, scores_path, scores_with_gt_path = os.path.join(model_dir, "gt-{}.csv".format(num)), os.path.join(model_dir, "scores-{}.csv".format(num)), os.path.join(model_dir, "scores_wth_gt-{}.csv".format(num))
        
    utils.save_results(test_loader.dataset.images, gt, utils.object_categories, gt_path)
    utils.save_results(test_loader.dataset.images, scores, utils.object_categories, scores_path)
    utils.append_gt(gt_path, scores_path, scores_with_gt_path)
        
    utils.get_classification_accuracy(gt_path, scores_path, os.path.join(model_dir, "clf_vs_threshold-{}.png".format(num)))
        
    return loss, ap
Exemple #27
0
        fname = "bkg_" + tag + file + ".npz"
        temp = np.load(dataDir + fname)
        images = temp['images'][:, 1:]
        images = np.reshape(images, [len(images), 40, 40, 4])
        x_test = images[:, :, :, [0, 2, 3]]
        if (i == 0): predictionsB = model.predict(x_test)
        else:
            predictionsB = np.concatenate(
                [predictionsB, model.predict(x_test)])

    predictions = np.concatenate((predictionsE, predictionsB))
    true = np.concatenate(
        (np.ones(len(predictionsE)), np.zeros(len(predictionsB))))
    y_test = keras.utils.to_categorical(true, num_classes=2)

    utils.plot_history(history, plotDir, ['loss', 'accuracy'])

    print()
    print("Calculating and plotting confusion matrix")
    cm = utils.calc_cm(y_test, predictions)
    utils.plot_confusion_matrix(cm, ['bkg', 'e'], plotDir + 'cm.png')
    print()

    print("Plotting certainty")
    utils.plot_certainty(y_test, predictions, plotDir + 'certainty.png')
    print()

    precision, recall = utils.calc_binary_metrics(cm)
    print("Precision = TP/(TP+FP) = fraction of predicted true actually true ",
          round(precision, 5))
    print("Recall = TP/(TP+FN) = fraction of true class predicted to be true ",
Exemple #28
0
print('shapes train; test; val:')
print(data_train.shape)
print(data_test.shape)

# corrompe o centro da imagem
cropy = 40
cropx = 40
x_train_noisy = crop_image(data_train, cropx, cropy)
x_test_noisy = crop_image(data_test, cropx, cropy)
#x_val_noisy = crop_image(data_val, cropx, cropy)

x_train_noisy = np.clip(x_train_noisy, 0., 1.)
x_test_noisy = np.clip(x_test_noisy, 0., 1.)
#x_val_noisy = np.clip(x_val_noisy, 0., 1.)

showOrigNoisy(data_test, x_test_noisy, num=4)

ae = Autoencoder()

if train:
    ae.treina_modelo(x_train_noisy, data_train, x_test_noisy, data_test)
    print(ae.history.history.keys())
    plot_history(ae.history)

c10test = ae.prever(x_test_noisy)

#print("c10test: {0}\nc10val: {1}".format(np.average(c10test), np.average(c10val)))

showOrigNoisyRec(data_test, x_test_noisy, c10test)
    end = time.clock()
    colorprint(Color.BLUE, 'Done! Elapsed time: ' + str(end - start) + 'sec\n')
    colorprint(Color.BLUE, 'Saving the model into ' + model_identifier + '.h5 \n')
    model.save_weights(
        'dump/patch_models/' + model_identifier + '.h5')  # always save your weights after training or during training
    colorprint(Color.BLUE, 'Done!\n')
    if not os.path.exists('dump/patch_histories'):
        os.mkdir('dump/patch_histories')
    with open('dump/patch_histories/' + model_identifier + '_history.pklz', 'wb') as f:
        cPickle.dump(
            (history.epoch, history.history, history.params, history.validation_data, model.get_config()), f,
            cPickle.HIGHEST_PROTOCOL)

    # summarize history for accuracy
    plot_history(history, model_identifier, metric='acc', plot_validation=TRAIN_WITH_VALIDATION, path='dump/patch_histories/')
    # summarize history for loss
    plot_history(history, model_identifier, metric='loss', plot_validation=TRAIN_WITH_VALIDATION, path='dump/patch_histories/')

colorprint(Color.BLUE, 'Start generating BoVW representation...\n')
start = time.clock()

model_layer = Model(inputs=model.input, outputs=model.get_layer(LAYER).output)

CLASSES = np.array(CLASSES)

y_train = []
X_train = []
for cls in CLASSES:
    for imname in os.listdir(os.path.join(DATASET_DIR, 'train', cls)):
        im = Image.open(os.path.join(DATASET_DIR, 'train', cls, imname))
Exemple #30
0
loss_history = []
mu_awareness = np.ones((t_sample,))

mu_awareness = torch.tensor(mu_awareness, requires_grad=True).float().to(device)
data_generator = SampleDataPointsGenerator()

# %%
# ----------- TRAIN -------------
for i in range(1000):
  loss = network.step(mu_item=next(data_generator), mu_awareness=mu_awareness, train=True)
  # print(loss)

  loss_history.append(loss)

  if (i+1) % 500 == 0:
    [plot_history(np.array(loss_history)[:, i, :], title='unit {}'.format(i)) for i in range(unit_count)]

    loss_history = []

print("==================")

# %%
# ----------- TEST -------------
next_mu_item = next(data_generator)
for i in range(20):
  loss = network.step(mu_item=next_mu_item, mu_awareness=mu_awareness, train=False)
  next_mu_item = loss[0][6][0].detach().numpy()
  # plot_1d(loss[0][6].detach().numpy(), title="mu")
  # plot_1d(loss[0][7].detach().numpy(), title="mu_bar")
  # plot_1d(loss[0][8].detach().numpy(), title="mu_hat")
Exemple #31
0
    args = parser.parse_args()
    cifar_dir = args.cifar_root
    fig_path = args.fig_path
    validation_split = args.val_split
    batch_size = args.batch_size
    epochs = args.epochs
    weight_path = args.weight_path
    weight_decay = args.weight_decay
    lr = args.lr

    SEED = args.seed # set random seed (default as 1234)

    # split train, val, test from `get_data` function
    train_loader, val_loader, test_loader = get_data(cifar_dir=cifar_dir, batch_size=batch_size, augment=True, validation_split=validation_split)

    # load model
    model = VGG_lite()
    # define loss
    loss = nn.CrossEntropyLoss()
    # train the model
    model, history = train(model, train_loader, val_loader, epochs, loss, batch_size, optimizer='adam', weight_decay=weight_decay, lr=lr)

    # save the model accordeing to `weight_path` from parser (default to './weights/final.pth')
    torch.save(model.state_dict(), weight_path)

    plot_history(history, fig_path) # save figures

    acc, cm, cm_norm = evaluate(model, test_loader) # evaluate trained model
    plot_cm(cm, cm_norm, fig_path) # save confusion matrix figures
    print('Test Accuracy: {}%'.format(round(acc*100, 4))) # print the model test accuracy