コード例 #1
0
ファイル: intent_prediction.py プロジェクト: asadovsky/nn
def train_and_evaluate_model(hp):
    """Trains and evaluates a model."""
    hp.set(run_id=datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
    _record_hyperparams(hp)
    for path in [_checkpoints_dir(hp.run_id), _logs_dir(hp.run_id)]:
        os.makedirs(path, exist_ok=True)

    if hp.word_emb.pretrained == "none":
        word2vec = {}
    elif hp.word_emb.pretrained == "glove":
        word2vec = embedding_utils.read_glove(hp.word_emb.dim, pruned=True)
    else:
        assert False, hp.word_emb.pretrained

    v = Vocab()
    v.add_dataset(dataset_iter(TRAIN_FILENAME))
    v.add_words(word2vec.keys())

    d_train = Dataset(v, dataset_iter(TRAIN_FILENAME))
    d_test = Dataset(v, dataset_iter(TEST_FILENAME))

    # Drop rare words so that UNK appears in the training set.
    d_train.drop_rare_words(hp.drop_rare_words_freq)

    model = build_model(d_train, word2vec, hp)
    print(model.summary())

    x_train, y_train, x_test, y_test, history = train_model(
        model, d_train, d_test, hp)

    plotting.plot_history(history, filepath=_plot_history_filepath(hp))
    evaluate_model("train", model, d_train, x_train, y_train, hp)
    evaluate_model("test", model, d_test, x_test, y_test, hp)
コード例 #2
0
ファイル: nn.py プロジェクト: delbalso/Neural-Nets-tinkering
def main():
    nn = NN([None, layers.ConvolutionalLayer(10, 10, 28, 28, 5, list()),
             layers.FullyConnectedLayer(10, 19 * 19, list(), unit=neuron.Logistic())])
    #nn = NN([layers.FullyConnectedLayer(784,784,list()), layers.ConvolutionalLayer(10,10,28,28,5,list()), layers.FullyConnectedLayer(10, 19*19,list(),unit=neuron.Logistic())])
    #nn = NN([layers.FullyConnectedLayer(784, 784, list()), layers.FullyConnectedLayer(28, 784, list()), layers.FullyConnectedLayer(10, 28, list(), unit=neuron.Logistic())])
# read in data
    """
    raw_data = pd.read_csv(
        "/Users/delbalso/projects/nn1/data/handwriting.csv",
        sep=",",
        header=None)
    raw_data = raw_data.reindex(np.random.permutation(raw_data.index))
    data = np.array(raw_data.transpose())
    num_labels = 10
    num_test_data = int(data.shape[1] * 0.2)
    features = data[:-num_labels, :]  # num_features x num_examples
    labels = data[-1 * num_labels:, :]  # num_labels x num_examples
    weights = train(
        labels[
            :,
            :-
            1 *
            num_test_data],
        features[
            :,
            :-
            num_test_data],
        nn)
    test(labels[:, -num_test_data:], features[:, -num_test_data:], weights, nn)
    """
    training_data, validation_data, test_data = mnist.load_data_wrapper_1()
    random.shuffle(training_data)
    training_features, training_labels = zip(*training_data[:500])
    training_data = MLDataSet(
        np.squeeze(training_features).transpose(),
        np.squeeze(training_labels).transpose())
    validation_features, validation_labels = zip(*validation_data[:])
    validation_data = MLDataSet(
        np.squeeze(validation_features).transpose(),
        np.squeeze(validation_labels).transpose())
    test_features, test_labels = zip(*test_data)
    test_data = MLDataSet(
        np.squeeze(test_features).transpose(),
        np.squeeze(test_labels).transpose())

    #hyperparam_search(1, 10, 1, 100, nn, training_data, validation_data)
    train(training_data, nn, 30, 10, validation_data=validation_data)

    test(test_data, nn)
    plot.plot_history(
        training_accuracy_history,
        validation_accuracy_history,
        training_cost_history,
        validation_cost_history)
コード例 #3
0
def main():
    nn = NN([
        None,
        layers.ConvolutionalLayer(10, 10, 28, 28, 5, list()),
        layers.FullyConnectedLayer(10, 19 * 19, list(), unit=neuron.Logistic())
    ])
    #nn = NN([layers.FullyConnectedLayer(784,784,list()), layers.ConvolutionalLayer(10,10,28,28,5,list()), layers.FullyConnectedLayer(10, 19*19,list(),unit=neuron.Logistic())])
    #nn = NN([layers.FullyConnectedLayer(784, 784, list()), layers.FullyConnectedLayer(28, 784, list()), layers.FullyConnectedLayer(10, 28, list(), unit=neuron.Logistic())])
    # read in data
    """
    raw_data = pd.read_csv(
        "/Users/delbalso/projects/nn1/data/handwriting.csv",
        sep=",",
        header=None)
    raw_data = raw_data.reindex(np.random.permutation(raw_data.index))
    data = np.array(raw_data.transpose())
    num_labels = 10
    num_test_data = int(data.shape[1] * 0.2)
    features = data[:-num_labels, :]  # num_features x num_examples
    labels = data[-1 * num_labels:, :]  # num_labels x num_examples
    weights = train(
        labels[
            :,
            :-
            1 *
            num_test_data],
        features[
            :,
            :-
            num_test_data],
        nn)
    test(labels[:, -num_test_data:], features[:, -num_test_data:], weights, nn)
    """
    training_data, validation_data, test_data = mnist.load_data_wrapper_1()
    random.shuffle(training_data)
    training_features, training_labels = zip(*training_data[:500])
    training_data = MLDataSet(
        np.squeeze(training_features).transpose(),
        np.squeeze(training_labels).transpose())
    validation_features, validation_labels = zip(*validation_data[:])
    validation_data = MLDataSet(
        np.squeeze(validation_features).transpose(),
        np.squeeze(validation_labels).transpose())
    test_features, test_labels = zip(*test_data)
    test_data = MLDataSet(
        np.squeeze(test_features).transpose(),
        np.squeeze(test_labels).transpose())

    #hyperparam_search(1, 10, 1, 100, nn, training_data, validation_data)
    train(training_data, nn, 30, 10, validation_data=validation_data)

    test(test_data, nn)
    plot.plot_history(training_accuracy_history, validation_accuracy_history,
                      training_cost_history, validation_cost_history)
コード例 #4
0
def mccv(x, y, model, plot=(), epochs=10, batch_size=10, iterations=500):
    """
    :param plot: To plot something, the first value of "plot" has to be the title of the plot, and the second has to
    be the name of the output file (without any extensions). These values are passed as a tuple.
    """
    auc = []  # ROC AUC.
    spec = [
    ]  # Proportion of actual negatives that are correctly identified as such.
    sens = [
    ]  # Proportion of actual positives that are correctly identified as such.
    history = []  # List of history objects. Used for plotting.
    for _ in tqdm(
            range(iterations)
    ):  # Rewrite to "for _ in range(iterations)" to remove tqdm dependency.
        x_new, x_tst, y_new, y_tst = train_test_split(x,
                                                      y,
                                                      test_size=16,
                                                      stratify=y)  # Test data.
        x_tr, x_val, y_tr, y_val = train_test_split(
            x_new, y_new, test_size=0.3, stratify=y_new)  # Train and val data.
        history.append(
            model.fit(x_tr,
                      y_tr,
                      validation_data=(x_val, y_val),
                      epochs=epochs,
                      batch_size=batch_size,
                      verbose=0))
        predictions = [i[0] for i in model.predict(x=x_tst).tolist()
                       ]  # Flattening prediction list.
        auc.append(roc_auc_score(y_tst, predictions))
        report = classification_report(
            y_tst, [0 if p < 0.5 else 1 for p in predictions],
            output_dict=True)
        spec.append(report["0"]["recall"])
        sens.append(report["1"]["recall"])

    print(ci(auc))
    print(ci(spec))
    print(ci(sens))

    if plot:
        history_avg = deepcopy(history[0])
        history_avg.history["acc"] = mean([h.history["acc"] for h in history],
                                          axis=0)
        history_avg.history["loss"] = mean(
            [h.history["loss"] for h in history], axis=0)
        history_avg.history["val_acc"] = mean(
            [h.history["val_acc"] for h in history], axis=0)
        history_avg.history["val_loss"] = mean(
            [h.history["val_loss"] for h in history], axis=0)
        plot_history(plot[0], history_avg, plot[1])
コード例 #5
0
def fine_tuning(preprocess, pretrained_vgg_model, debug=False):
    if debug:
        pretrained_vgg_model.summary()
    number_of_classes = 10
    pretrained_layers = pretrained_vgg_model.layers
    fine_tuned = list()
    for idx, num_samples in enumerate([100, 1000, 10000]):
        model = keras.Sequential()
        for pretrained_layer in pretrained_layers[:
                                                  -2]:  # Without last Dense and its Activation
            pretrained_layer.trainable = False
            model.add(pretrained_layer)

        # Adding the trainable layer and its activation
        model.add(Dense(number_of_classes))
        model.add(Activation('softmax'))

        if debug:
            model.summary()

        batch_sizes = 8, 32, 128  #TODO: are you sure you want different?
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
        x_train, _, y_train, _ = train_test_split(x_train,
                                                  y_train,
                                                  train_size=num_samples,
                                                  random_state=42,
                                                  stratify=y_train)
        x_train = preprocess(x_train)
        x_test = preprocess(x_test)
        y_train = keras.utils.to_categorical(y_train, number_of_classes)
        y_test = keras.utils.to_categorical(y_test, number_of_classes)

        batch_size = batch_sizes[idx]
        max_epochs = 200 if 100 == num_samples else 100
        learning_rate = 0.0001
        lr_decay = 1e-6
        lr_drop = 20

        def lr_scheduler(epoch):
            return learning_rate * (0.5**(epoch // lr_drop))

        reduce_lr = keras.callbacks.LearningRateScheduler(lr_scheduler)

        if debug:
            max_epochs = 3
            x_test = x_test[:20]
            y_test = y_test[:20]

        # data augmentation
        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=
            False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            # zca_whitening=False,  # apply ZCA whitening
            # rotation_range=15,  # randomly rotate images in the range (degrees, 0 to 180)
            # width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
            # height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
            # horizontal_flip=True,  # randomly flip images
            vertical_flip=False)  # randomly flip images
        # (std, mean, and principal components if ZCA whitening is applied).
        datagen.fit(x_train)

        # optimization details
        #sgd = optimizers.SGD(lr=learning_rate, decay=lr_decay, momentum=0.9, nesterov=True)
        sgd = optimizers.Adam(lr=learning_rate, decay=lr_decay)
        model.compile(loss='categorical_crossentropy',
                      optimizer=sgd,
                      metrics=['accuracy'])

        model.summary()

        history = model.fit_generator(
            datagen.flow(x_train, y_train, batch_size=batch_size),
            steps_per_epoch=x_train.shape[0] // batch_size,
            epochs=max_epochs,
            validation_data=(x_test, y_test),
            # callbacks=[reduce_lr],
            verbose=1)
        model.save_weights(
            'cifar10vgg_finetuning_{}_trainset.h5'.format(num_samples))
        fine_tuned.append((model, history))
        plot_history(
            history,
            'Fine tuning with {} training samples'.format(num_samples))

    return fine_tuned
コード例 #6
0
def collect():

    name_list = [
        "mnist",
        #"mnistcov"
    ]

    nr_of_epochs = 8000
    record_all_flag = False
    rec_test_flag = True
    learning_r = [0.0004]
    save_data_flag = False
    show_flag = True
    separate_flag = False
    save_MI_and_plot_flag = False

    bin_size_or_nr = [True]
    bins = [0.01, 0.07, 0.15, 0.3]

    color_list = [
        "red", "blue", "green", "orange", "purple", "brown", "pink", "teal",
        "goldenrod"
    ]

    models = [3, 4, 2, 1, 5, 6]

    for set_name in name_list:
        nrs = [3, 8, 1]
        #samples = "full"
        samples = 1600
        seed(1337)
        set_random_seed(1337)
        X_train, X_test, y_train, y_test = data_selection.select_data(
            set_name, shuffle=True, samples_per_class=samples, list_of_nrs=nrs)
        batch_size = [256, X_train.shape[0], 128, 512]

        print("calculations starting for: ", set_name)
        for i in models:
            if (i <= 4) and ("cov" in set_name):
                continue
            if (i > 4) and ("cov" not in set_name):
                continue

            for batch in batch_size:

                seed(1337)
                set_random_seed(1337)

                # objects to record parameters
                outputs = classes.Outputs()

                # define and train model
                output_recording = LambdaCallback(
                    on_epoch_end=lambda
                    epoch, logs: Callbacks.record_activations(
                        outputs, model, epoch, X_train, X_test, y_test, batch,
                        record_all_flag, rec_test_flag))

                model, architecture = model_selection.select_model(
                    i, nr_of_epochs, set_name, X_train.shape, y_train)

                adam = optimizers.Adam(lr=learning_r)
                model.compile(loss="categorical_crossentropy",
                              optimizer=adam,
                              metrics=["accuracy"])

                history = model.fit(X_train,
                                    y_train,
                                    epochs=nr_of_epochs,
                                    batch_size=batch,
                                    validation_split=0.2,
                                    callbacks=[output_recording])
                # final model score
                score = model.evaluate(X_test, y_test, verbose=0)
                score = score[1]

                # save data
                common_name = architecture + "_lr_" + str(
                    learning_r) + "_batchsize_" + str(batch)
                if "mnist" in set_name:
                    common_name = str(samples) + str(nrs) + common_name

                aname = common_name + "_activations"
                outputs.model_score = score
                if save_data_flag == True:
                    util.save(outputs, aname)

                hname = common_name + "_history"
                h_obj = history.history
                h_obj["model_score"] = score
                if save_data_flag == True:
                    util.save(h_obj, hname)

                plotting.plot_history(h_obj, common_name, show_flag,
                                      save_MI_and_plot_flag)

                if rec_test_flag == True:
                    plotting.plot_test_development(outputs.int_model_score,
                                                   common_name, show_flag,
                                                   save_MI_and_plot_flag)

                # compute binning MI
                for flag in bin_size_or_nr:
                    for nr_of_bins in bins:
                        if flag == True and nr_of_bins > 1:
                            continue
                        if flag == False and nr_of_bins < 1:
                            continue
                        seed(1337)
                        set_random_seed(1337)
                        est_type_flag = 1
                        info_plane.create_infoplane(common_name,
                                                    X_train,
                                                    y_train,
                                                    outputs,
                                                    est_type_flag,
                                                    color_list,
                                                    nr_of_bins,
                                                    flag,
                                                    show_flag,
                                                    separate_flag,
                                                    save_MI_and_plot_flag,
                                                    par_flag=False)

                seed(1337)
                set_random_seed(1337)
                # compute EDGE
                if batch == 256:
                    est_type_flag = 2
                    info_plane.create_infoplane(
                        common_name,
                        X_train,
                        y_train,
                        outputs,
                        est_type_flag,
                        color_list,
                        show_flag,
                        separate_flag,
                        save_flag=save_MI_and_plot_flag,
                        par_flag=True)

                seed(1337)
                set_random_seed(1337)
                # compute KDE upper
                est_type_flag = 3
                info_plane.create_infoplane(common_name,
                                            X_train,
                                            y_train,
                                            outputs,
                                            est_type_flag,
                                            color_list,
                                            show_flag,
                                            separate_flag,
                                            save_flag=save_MI_and_plot_flag,
                                            par_flag=False)
                seed(1337)
                set_random_seed(1337)
                # compute KDE lower
                if batch == 256:
                    est_type_flag = 4
                    info_plane.create_infoplane(
                        common_name,
                        X_train,
                        y_train,
                        outputs,
                        est_type_flag,
                        color_list,
                        show_flag,
                        separate_flag,
                        save_flag=save_MI_and_plot_flag,
                        par_flag=False)

                seed(1337)
                set_random_seed(1337)
                # compute KSG discrete
                est_type_flag = 5
                info_plane.create_infoplane(common_name,
                                            X_train,
                                            y_train,
                                            outputs,
                                            est_type_flag,
                                            color_list,
                                            show_flag,
                                            separate_flag,
                                            save_flag=save_MI_and_plot_flag,
                                            par_flag=False)
コード例 #7
0
    cuda_particles = cuda.mem_alloc(4 * N * (6 + 6*MAX_LANDMARKS))
    cuda_measurements = cuda.mem_alloc(4 * 2 * MAX_MEASUREMENTS)
    cuda_cov = cuda.mem_alloc(4 * 4)

    # plt.pause(5)

    for i in range(u.shape[0]):
        print(i)

        if PLOT:
            plt.pause(0.05)
            ax[0].clear()
            ax[0].set_xlim([-5, 20])
            ax[0].set_ylim([-5, 20])
            plot_landmarks(ax[0], landmarks)
            plot_history(ax[0], real_position_history, color='green')
            plot_history(ax[0], predicted_position_history, color='orange')
            plot_particles_grey(ax[0], particles)

        vehicle.move_noisy(u[i])
        real_position_history.append(vehicle.position)

        FlatParticle.predict(particles, u[i], sigmas=movement_variance, dt=1)

        visible_measurements = sensor.get_noisy_measurements(vehicle.position[:2])

        particles = update(
            particles, THREADS, BLOCK_SIZE, visible_measurements, measurement_variance,
            cuda_particles, cuda_measurements, cuda_cov, THRESHOLD
        )
コード例 #8
0
ファイル: 5_3_pretrained.py プロジェクト: stefandi94/dl_book
validation_generator = test_datagen.flow_from_directory(validation_dir,
                                                        target_size=(150, 150),
                                                        batch_size=BATCH_SIZE,
                                                        class_mode='binary')

model_2.compile(loss='binary_crossentropy',
                optimizer=optimizers.RMSprop(lr=2e-5),
                metrics=['acc'])

history = model_2.fit_generator(train_generator,
                                steps_per_epoch=N_TRAIN_IMAGES // BATCH_SIZE,
                                epochs=10,
                                validation_data=validation_generator,
                                validation_steps=50)
plot_history(history)

conv_base.trainable = True

set_trainable = False
for layer in conv_base.layers:
    if layer.name == 'block5_conv1':
        set_trainable = True
    if set_trainable:
        layer.trainable = True
    else:
        layer.trainable = False

model_2.compile(loss='binary_crossentropy',
                optimizer=optimizers.RMSprop(lr=1e-5),
                metrics=['acc'])
コード例 #9
0
def run_cem(env_id,
            epochs=50,
            batch_size=4096,
            elite_frac=0.0625,
            randomize=True,
            only_success_elite=False,
            reward_type="sparse",
            env_index=0,
            single_obj_reward=-1,
            extra_std=2.0,
            extra_decay_time=10,
            num_process=8):

    now = datetime.now()
    time_now = now.strftime("%m-%d-%H-%M-%S")
    output_dir = './{}/{}/'.format(env_id, time_now)
    print("output_dir: ", output_dir)
    ensure_dir(output_dir)

    start = time.time()
    num_episodes = epochs * num_process * batch_size
    print('expt of {} total episodes'.format(num_episodes))

    num_elite = int(batch_size * elite_frac)
    history = defaultdict(list)

    z_dim = 4 * 12
    means = np.zeros(z_dim)
    stds = np.ones(z_dim)

    for epoch in tqdm(range(epochs)):
        print("current epoch number: ", epoch)
        extra_cov = max(1.0 - epoch / extra_decay_time, 0) * extra_std**2

        zs = np.random.multivariate_normal(
            mean=means,
            cov=np.diag(np.array(stds**2) + extra_cov),
            size=batch_size)

        with Pool(num_process) as p:
            returns_successes = p.map(
                partial(evaluate_z,
                        randomize=randomize,
                        reward_type=reward_type,
                        output_dir=output_dir,
                        epoch=epoch,
                        env_index=env_index,
                        single_obj_reward=single_obj_reward), zs)

        print(returns_successes)
        returns = [rs[0] for rs in returns_successes]
        successes = [rs[1] for rs in returns_successes]
        log_prob_sum = [rs[2] for rs in returns_successes]
        print("successes: ")
        print(successes)
        print("log prob sum: ")
        print(log_prob_sum)
        returns = np.array(returns)
        successes = np.array(successes)

        indexes = get_elite_indicies(num_elite, returns, successes,
                                     only_success_elite)

        elites = zs[indexes]

        means = elites.mean(axis=0)
        stds = elites.std(axis=0)

        history['epoch'].append(epoch)
        history['avg_ret'].append(np.mean(returns))
        history['std_ret'].append(np.std(returns))
        history['avg_ret_elites'].append(np.mean(returns[indexes]))
        history['std_ret_elites'].append(np.std(returns[indexes]))
        history['avg_suc'].append(np.mean(successes))
        history['std_suc'].append(np.std(successes))
        history['avg_suc_elites'].append(np.mean(successes[indexes]))
        history['std_suc_elites'].append(np.std(successes[indexes]))

        print('epoch {} - population returns: {} {} - elite returns: {} {}'.
              format(epoch, history['avg_ret'][-1], history['std_ret'][-1],
                     history['avg_ret_elites'][-1],
                     history['std_ret_elites'][-1]))

        print(
            'epoch {} - population successes: {} {} - elite successes: {} {}'.
            format(epoch, history['avg_suc'][-1], history['std_suc'][-1],
                   history['avg_suc_elites'][-1],
                   history['std_suc_elites'][-1]))

        if True:  #epoch % 5 == 0:
            end = time.time()
            expt_time = end - start
            plot_history(history, output_dir, epoch, expt_time)
            save_path_history = os.path.join(output_dir,
                                             "history_{}.npy".format(epoch))
            np.save(save_path_history, history)
            save_path_elites = os.path.join(output_dir,
                                            "elites_{}.npy".format(epoch))
            np.save(save_path_elites, elites)

    end = time.time()
    expt_time = end - start
    print('expt took {:2.1f} seconds'.format(expt_time))

    plot_history(history, output_dir, epochs, expt_time)
    num_optimal = 5
    print('epochs done - evaluating {} best zs'.format(num_optimal))

    best_z_rewards = [
        evaluate_z(z,
                   randomize=randomize,
                   reward_type=reward_type,
                   output_dir=output_dir,
                   epoch=epoch,
                   env_index=env_index,
                   single_obj_reward=single_obj_reward)
        for z in elites[:num_optimal]
    ]
    print('best rewards - {} across {} samples'.format(best_z_rewards,
                                                       num_optimal))
コード例 #10
0
ファイル: main.py プロジェクト: terechsan/objectsegmentator
        EPOCH = 1
        train_losses, train_ious, valid_losses, valid_ious = [], [], [], []
        for metrics in net.train_and_validate(train_loader,
                                              valid_loader,
                                              args.epochs,
                                              optimizer=optimizer,
                                              transform_data=transform_data,
                                              denormalizer=denormalize):
            estimate_masked, train_loss, train_iou, valid_loss, valid_iou = metrics
            save_batch(
                estimate_masked,
                os.path.join(figure_dir, "estimates",
                             f"estimates-{EPOCH}.png"))
            train_losses.append(train_loss)
            train_ious.append(train_iou)
            valid_losses.append(valid_loss)
            valid_ious.append(valid_iou)
            EPOCH += 1
        fig = plot_history({
            "train_loss": train_losses,
            "train_iou": train_ious,
            "valid_loss": valid_losses,
            "valid_iou": valid_ious
        })
        fig.savefig(os.path.join(figure_dir, "output_figure.pdf"))
        plt.close(fig)
        torch.save(net.state_dict(), os.path.join(figure_dir, "model.pth"))
        torch.save(net, os.path.join(figure_dir, "model_object.pt"))
    else:
        print(f"{option} not implemented yet")
コード例 #11
0
ファイル: cem.py プロジェクト: huihanl/cem
def run_cem(env_id,
            epochs=10,
            batch_size=4096,
            elite_frac=0.2,
            extra_std=2.0,
            extra_decay_time=10,
            num_process=8):
    ensure_dir('./{}/'.format(env_id))

    start = time.time()
    num_episodes = epochs * num_process * batch_size
    print('expt of {} total episodes'.format(num_episodes))

    num_elite = int(batch_size * elite_frac)
    history = defaultdict(list)

    env, obs_shape, act_shape = setup_env(env_id)
    theta_dim = (obs_shape + 1) * act_shape
    means = np.random.uniform(size=theta_dim)
    stds = np.ones(theta_dim)

    for epoch in range(epochs):

        extra_cov = max(1.0 - epoch / extra_decay_time, 0) * extra_std**2

        thetas = np.random.multivariate_normal(
            mean=means,
            cov=np.diag(np.array(stds**2) + extra_cov),
            size=batch_size)

        with Pool(num_process) as p:
            rewards = p.map(partial(evaluate_theta, env_id=env_id), thetas)

        rewards = np.array(rewards)

        indicies = get_elite_indicies(num_elite, rewards)
        elites = thetas[indicies]

        means = elites.mean(axis=0)
        stds = elites.std(axis=0)

        history['epoch'].append(epoch)
        history['avg_rew'].append(np.mean(rewards))
        history['std_rew'].append(np.std(rewards))
        history['avg_elites'].append(np.mean(rewards[indicies]))
        history['std_elites'].append(np.std(rewards[indicies]))

        print('epoch {} - {:2.1f} {:2.1f} pop - {:2.1f} {:2.1f} elites'.format(
            epoch, history['avg_rew'][-1], history['std_rew'][-1],
            history['avg_elites'][-1], history['std_elites'][-1]))

    end = time.time()
    expt_time = end - start
    print('expt took {:2.1f} seconds'.format(expt_time))

    plot_history(history, env_id, num_episodes, expt_time)
    num_optimal = 3
    print('epochs done - evaluating {} best thetas'.format(num_optimal))

    best_theta_rewards = [
        evaluate_theta(theta, env_id, monitor=True)
        for theta in elites[:num_optimal]
    ]
    print('best rewards - {} acoss {} samples'.format(best_theta_rewards,
                                                      num_optimal))
コード例 #12
0
                   np.tile([0.0, 0.7], (14, 1))))

    # s_history = [s]
    ss_history = [ss]
    slam_history = [get_mean(particles)]

    for i in range(68):
        print(i)

        plt.pause(0.01)

        ax.clear()
        ax.set_xlim([0, 17])
        ax.set_ylim([0, 17])
        plot_landmarks(ax, landmarks)
        plot_history(ax, ss_history, color='green')
        plot_history(ax, slam_history, color='orange')
        # plot_connection(ax, ss, z_real[landmark_indices, :] + ss[:2])
        plot_particles_grey(ax, particles)

        # s = move_vehicle_exact(s, u[i], dt=1)
        ss = move_vehicle_stochastic(ss, u[i], dt=1, sigmas=[0.05, 0.05])
        # s_history.append(s)
        ss_history.append(ss)

        particles = predict(particles, u[i], sigmas=[0.05, 0.05], dt=1)
        # print(particles)

        # sigmas = [0.05, 0.05]
        R = np.array([[sigmas[0], 0], [0, sigmas[1]]], dtype=np.float)
コード例 #13
0
def show_history_test(filenames, path):
    for f in filenames:
        with open(path + f, 'r') as pkl_fd:
            history = pickle.load(pkl_fd)
        plotting.plot_history(history, 'acc',
                              'Densenet121 - ' + f.split('_')[-3])
コード例 #14
0
                                                       class_mode='binary')
    history = model.fit_generator(train_generator,
                                  steps_per_epoch=100,
                                  epochs=EPOCHS,
                                  validation_data=validation_generator,
                                  validation_steps=50,
                                  callbacks=callbacks_list)
else:
    train_datagen = ImageDataGenerator(rescale=1. / 255)
    test_datagen = ImageDataGenerator(rescale=1. / 255)
    train_generator = train_datagen.flow_from_directory(train_dir,
                                                        target_size=(150, 150),
                                                        batch_size=BATCH_SIZE,
                                                        class_mode='binary')

    validation_generator = test_datagen.flow_from_directory(
        validation_dir,
        target_size=(150, 150),
        batch_size=BATCH_SIZE,
        class_mode='binary')
    history = model.fit_generator(train_generator,
                                  steps_per_epoch=2 * N_TRAIN_IMAGES //
                                  BATCH_SIZE,
                                  epochs=20,
                                  validation_data=validation_generator,
                                  validation_steps=50,
                                  callbacks=callbacks_list)

model.save(osp.join(CONV_NETS_DIR, 'cats_and_dogs_small_1.h5'))
plot_history(history)