Exemplo n.º 1
0
def demo_configuration_space(neurons=create_feeding_snail(),
                             file_graph='fig/demo_graph',
                             file_hist='fig/demo_hist'):
    inj_0 = {'ACH': 0, 'GLU': 0}
    inj_1 = {'ACH': 0, 'GLU': 1}
    inj_2 = {'ACH': 0, 'GLU': 1.5}
    inj_3 = {'ACH': 0, 'GLU': 2}
    inj_4 = {'ACH': 0, 'GLU': 2.5}
    inj_5 = {'ACH': 0, 'GLU': 3}
    graph_0 = conf.make_configuration_space(neurons, inj_0)
    graph_1 = conf.make_configuration_space(neurons, inj_1)
    graph_3 = conf.make_configuration_space(neurons, inj_3)
    #print(graph.states)
    #print(graph.transitions)
    plt.plot_configuration_space(graph_0.states, graph_0.transitions,
                                 file_graph + "_0")
    plt.plot_configuration_space(graph_1.states, graph_1.transitions,
                                 file_graph + "_1")
    plt.plot_configuration_space(graph_3.states, graph_3.transitions,
                                 file_graph + "_3")
    #plt.plot_branches([graph.transitions[0].branch], 'snail_branches_0.gv')
    #plt.plot_branches([graph.transitions[1].branch], 'snail_branches_1.gv')
    #plt.plot_branches([graph.transitions[2].branch], 'snail_branches_2.gv')
    injection = [inj_0] * 10 + [inj_1] * 11 + [inj_3] * 9
    T = len(injection)
    exp = nrn.Experiment("Test experiment", T, neurons, ['ACH', 'GLU'])
    exp.injection = injection
    hist, is_valid = nrn.generate_rhythm_recursive(exp)
    print(len(hist))
    print(is_valid)
    #nrn.print_rhythm_ascii(hist)
    plot_history(hist, file_hist)
Exemplo n.º 2
0
def demo_recursive_activation():
    neurons = create_hco()
    start_activity = [
        nrn.get_activities(neurons,
                           nrn._zeros_dict(nrn._list_transmitters(neurons)))
    ]
    branches = nrn._recursive_activation(neurons,
                                         start_activity,
                                         order=[],
                                         injection=[])
    for b in branches:
        print_branch(b)
    exp = nrn.Experiment("Test experiment", 10, neurons, ['ACH', 'GLU'])
    hist = nrn.generate_rhythm_recursive(exp)
    plot_history(hist)
Exemplo n.º 3
0
def demo_hco():
    neurons = create_hco()
    neurons[1].activity_levels = (0, 0.92, 2)
    neurons[0].activity_levels = (0, 0.91, 2)
    exp = nrn.Experiment("Test experiment", 10, neurons, ['ACH', 'GLU'])
    hist = nrn.generate_rhythm(exp)
    print("Half-center oscillator:")
    nrn.print_rhythm_ascii(hist)
    plot_history(hist)
    neurons = create_hco()
    injection = {'ACH': 0, 'GLU': 0}
    graph = conf.make_configuration_space(neurons, injection)
    print(graph.states)
    print(graph.transitions)
    plt.plot_configuration_space(graph.states, graph.transitions, 'fig/hco_no_inj.gv', \
        transmitter_colors={'ach':'red', 'glu':'green'})

    return neurons, graph
Exemplo n.º 4
0
def demo_feeding_cpg():
    duration = 20
    T_INJ_ACH = 6
    T_INJ_GLU = 13
    exp = nrn.Experiment('Feeding CPG', duration, create_feeding_snail(),
                         ['ach', 'glu'])
    inj = [{'ach': 0, 'glu': 0}] * duration
    #delta = {'ach':0, 'glu':0.15}
    FIXED_INJ_ACH = 1
    FIXED_INJ_GLU = 1
    for i in range(T_INJ_ACH, T_INJ_GLU):
        inj[i] = {'ach': FIXED_INJ_ACH, 'glu': 0}
    for i in range(T_INJ_GLU, duration):
        inj[i] = {'ach': 0, 'glu': FIXED_INJ_GLU}

    exp.injection = inj
    hist = nrn.generate_rhythm_recursive(exp)
    plot_history(hist)
    graph = conf.make_configuration_space(neurons, inj)
    plt.plot_configuration_space(graph.states, graph.transitions, "fig/snail")
Exemplo n.º 5
0
def main():
    generations = 500
    for use_pca in [True, False]:
        for function_tuple in [(functions.FSphere, (-100, 100)),
                               (functions.FRastrigin, (-5, 5)),
                               (functions.FGrienwank, (-600, 600)),
                               (functions.FRosenbrock, (-100, 100))]:
            start = timer()
            return_dict = run(use_pca, function_tuple[0], function_tuple[1][0],
                              function_tuple[1][1], generations)
            end = timer()
            return_dict.update({"run_time": (end - start)})
            filename = f"benchmark/{(function_tuple[0].__name__)}{'_pca' if use_pca else ''}"
            with open(f"{filename}.pickle", 'wb') as handle:
                pickle.dump(return_dict,
                            handle,
                            protocol=pickle.HIGHEST_PROTOCOL)
            plot.plot_history(return_dict['history']['min_val'], generations,
                              f"{filename}")
    os.system('shutdown -s -t 0')
Exemplo n.º 6
0
def train(EPOCHS=1000):
    model = _build_model()
    model.summary()

    history = model.fit(train_data,
                        train_labels,
                        batch_size=100,
                        epochs=EPOCHS,
                        validation_split=0.2,
                        verbose=0,
                        callbacks=[NetworkCallback(EPOCHS)])

    hist = pd.DataFrame(history.history)
    hist['epoch'] = history.epoch
    print(hist.tail())

    plot.plot_history(history)

    loss, mae, mse = model.evaluate(test_data, test_labels, verbose=1)

    print("Testing set Mean Abs Error: {:5.2f} Score".format(mae))

    model.save('model.h5')
Exemplo n.º 7
0
def main():
    """
    code snippet to load the data and train a model
    :return:
    """
    data_directory = ""
    x_train = np.load(os.path.join(data_directory, "x_train.npy"))
    y_train = np.load(os.path.join(data_directory, "y_train.npy"))

    x_valid = np.load(os.path.join(data_directory, "x_valid.npy"))
    y_valid = np.load(os.path.join(data_directory, "y_valid.npy"))
    ####################################################################################################################
    # do some pre processing
    ####################################################################################################################
    # convert to grayscale if wanted (last dimension is the color channel)

    ####################################################################################################################
    # build your model
    ####################################################################################################################
    # in this example we use a simple linear model
    model = create_model(x_train.shape[1:])
    ret_values = get_config('batch_size', 'epochs')
    
    history = model.fit(x_train, y_train, batch_size=ret_values['batch_size'],
          epochs=ret_values['epochs'],
          validation_data=(x_valid, y_valid), 
          verbose=1)

    # save the model
    now = datetime.now()
    name_string = f"model_{now.timestamp()}"
    model.save_weights(f"./models/{name_string}")
    print("created and saved the model")

    plot_history(history, f"{name_string}_curve")
    plot_model(model, to_file=os.path.join('./plots', f'{name_string}_model.png'))
Exemplo n.º 8
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--arch',
                        default="model",
                        help='architecture (model_dir)')
    parser.add_argument('--do_train', action='store_true')
    parser.add_argument('--do_predict', action='store_true')
    parser.add_argument('--do_plot', action='store_true')
    parser.add_argument('--hidden_size', default=256, type=int)
    parser.add_argument('--batch_size', default=256, type=int)
    parser.add_argument('--max_epoch', default=10000, type=int)
    parser.add_argument('--lr', default=1e-3, type=float)
    parser.add_argument('--step_lr', default=0.5, type=float)
    parser.add_argument('--cuda', default=0, type=int)
    parser.add_argument('--ckpt',
                        type=int,
                        help='load pre-trained model epoch')
    args = parser.parse_args()

    if args.do_train:

        dataset = pd.read_csv("../../data/train.csv")
        dataset.drop("Id", axis=1, inplace=True)
        train_set, valid_set = train_test_split(dataset,
                                                test_size=0.1,
                                                random_state=73)
        feature_for_training = ["F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9"]
        feature_for_prediction = ["F1"]

        train = preprocess_samples(train_set, feature_for_training,
                                   feature_for_prediction)
        valid = preprocess_samples(valid_set, feature_for_training,
                                   feature_for_prediction)

        trainData = FeatureDataset(train)
        validData = FeatureDataset(valid)

        device = torch.device(
            'cuda:%d' % args.cuda if torch.cuda.is_available() else 'cpu')
        max_epoch = args.max_epoch
        trainer = Trainer(device, trainData, validData, args)

        for epoch in range(1, max_epoch + 1):
            print('Epoch: {}'.format(epoch))
            trainer.run_epoch(epoch, True)
            trainer.run_epoch(epoch, False)

    if args.do_predict:

        dataset = pd.read_csv("../../data/test.csv")
        dataset.drop("Id", axis=1, inplace=True)
        feature_for_testing = ["F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9"]
        test = preprocess_samples(dataset, feature_for_testing)

        testData = FeatureDataset(test)

        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        model = SimpleNet(input_size=9,
                          output_size=12,
                          hidden_size=args.hidden_size)
        model.load_state_dict(
            torch.load('%s/model.pkl.%d' % (args.arch, args.ckpt)))
        model.train(False)
        model.to(device)
        dataloader = DataLoader(dataset=testData,
                                batch_size=args.batch_size,
                                shuffle=False,
                                collate_fn=testData.collate_fn,
                                num_workers=4)
        trange = tqdm(enumerate(dataloader),
                      total=len(dataloader),
                      desc='Predict')
        prediction = []
        for i, (ft, _, y) in trange:
            b = ft.shape[0]
            missing_ft = torch.zeros(b, 1)
            all_ft = torch.cat([missing_ft, ft], dim=1)
            o_labels, _ = model(all_ft.to(device))
            o_labels = torch.argmax(o_labels, axis=1)
            prediction.append(o_labels.to('cpu').numpy().tolist())

        prediction = sum(prediction, [])
        SubmitGenerator(prediction, "../../data/sampleSubmission.csv")

    if args.do_plot:
        plot_history("{file}/history.json".format(file=args.arch))
Exemplo n.º 9
0
def train(model, network_input, network_output):
    """ train the neural network """
    filepath = "trained_models/weights-{epoch:02d}-{loss:.4f}.hdf5"
    checkpoint = ModelCheckpoint(
        filepath,
        monitor='loss',
        verbose=0,
        save_best_only=True,
        mode='min'
    )
    callbacks_list = [checkpoint]

    history = model.fit(network_input, network_output, epochs=200, batch_size=64, callbacks=callbacks_list)
    return history

if __name__ == '__main__':
    
    # Pre processing of data
    notes = get_notes()
    n_vocab = len(set(notes))
    
    # Data formation
    network_input, network_output = prepare_sequences(notes, n_vocab)
    # Model architecture
    model = create_network(network_input, n_vocab)
    # Treaining
    history = train(model, network_input, network_output)
    # Ploting
    plot_history(history)

Exemplo n.º 10
0
                         log_interval)
    with open('./{}/record_history.pkl'.format(experiment_folder),
              'wb') as pkl_file:
        pickle.dump(record_history, pkl_file)
        pkl_file.close()

    torch.save(model.state_dict(), trained_weight_file)
else:
    with open('./{}/record_history.pkl'.format(experiment_folder),
              'rb') as pkl_file:
        record_history = pickle.load(pkl_file)
        pkl_file.close()

    model.load_state_dict(torch.load(trained_weight_file))

plot_history(experiment_folder, record_history)

if embed_dim > 2:
    train_embeddings_baseline, train_labels_baseline, train_all_images = extract_embeddings_high_dim(
        train_loader, model, embed_dim)
    val_embeddings_baseline, val_labels_baseline, val_all_images = extract_embeddings_high_dim(
        test_loader, model, embed_dim)
else:
    train_embeddings_baseline, train_labels_baseline = extract_embeddings(
        train_loader, model)
    val_embeddings_baseline, val_labels_baseline = extract_embeddings(
        test_loader, model)
plot_embeddings(experiment_folder, 'train', train_embeddings_baseline,
                train_labels_baseline)
plot_embeddings(experiment_folder, 'test', val_embeddings_baseline,
                val_labels_baseline)
Exemplo n.º 11
0
def main():

    # Save parameters first for records
    save_details()

    # Load dataset
    x_dataset, y_dataset = read_data(r"../customized_data/CE200",
                                     LOAD_SONG_AMOUNT, SAMPLE_RATE, HOP_LENGTH,
                                     MODEL_TARGET, PRED_MODE)
    # Split loaded datasets into training and validating purpose
    x_dataset_train = x_dataset[:int(len(x_dataset) * (1 - VALID_RATIO))]
    y_dataset_train = y_dataset[:int(len(y_dataset) * (1 - VALID_RATIO))]
    x_dataset_valid = x_dataset[int(len(x_dataset) * (1 - VALID_RATIO)):]
    y_dataset_valid = y_dataset[int(len(y_dataset) * (1 - VALID_RATIO)):]

    # Process datasets to data with input format
    x_train, y_train = [], []
    x_valid, y_valid = [], []

    progress_bar = tqdm(range(len(x_dataset_train)),
                        desc="Processing train data",
                        total=len(x_dataset_train),
                        ascii=True)
    # 'i' for song no.
    for i in progress_bar:
        # 'j' for frame no. in a song
        for j in range(0,
                       len(x_dataset_train[i]) - BATCH_LEN + 1, DATASET_HOP):
            x_train.append(x_dataset_train[i][j:j + BATCH_LEN])
            y_train.append(y_dataset_train[i][j:j + BATCH_LEN])

    progress_bar = tqdm(range(len(x_dataset_valid)),
                        desc="Processing valid data",
                        total=len(x_dataset_valid),
                        ascii=True)
    for i in progress_bar:
        for j in range(0,
                       len(x_dataset_valid[i]) - BATCH_LEN + 1, DATASET_HOP):
            x_valid.append(x_dataset_valid[i][j:j + BATCH_LEN])
            y_valid.append(y_dataset_valid[i][j:j + BATCH_LEN])

    x_train = np.array(x_train)
    y_train = np.array(y_train)
    x_valid = np.array(x_valid)
    y_valid = np.array(y_valid)

    # Global variable 'loss_criterion' for both training and validating step
    # Global variable 'optimizer', 'train_all_loss', 'train_mid_loss', 'train_all_acc' and 'train_mid_acc' for training step
    # Global variable 'valid_all_loss', 'valid_mid_loss', 'valid_all_acc' and 'valid_mid_acc' for validating step
    global loss_criterion, optimizer
    global train_all_loss, train_mid_loss, train_all_acc, train_mid_acc
    global valid_all_loss, valid_mid_loss, valid_all_acc, valid_mid_acc
    if PRED_MODE == 'quality_bitmap':
        loss_criterion = tf.keras.losses.MeanSquaredError(reduction='none')
    else:
        loss_criterion = tf.keras.losses.CategoricalCrossentropy(
            reduction='none')
    optimizer = tf.keras.optimizers.Adam(
        CustomSchedule(  # lr_schedule
            initial_lr=INITIAL_LR,
            warmup_steps=WARMUP_STEPS,
            decay_steps=DECAY_STEPS,
            decay_rate=DECAY_RATE,
            min_lr=MIN_LR,
        ))
    train_all_loss = tf.keras.metrics.Mean()
    train_mid_loss = tf.keras.metrics.Mean()
    train_all_acc = tf.keras.metrics.Mean()
    train_mid_acc = tf.keras.metrics.Mean()
    valid_all_loss = tf.keras.metrics.Mean()
    valid_mid_loss = tf.keras.metrics.Mean()
    valid_all_acc = tf.keras.metrics.Mean()
    valid_mid_acc = tf.keras.metrics.Mean()

    # Define the model & checkpoint
    global model
    model = MyModel(MODEL_TARGET, PRED_MODE, BATCH_LEN, DIM, DROPOUT, QKV_DIM,
                    N, NUM_HEADS, CONV_NUM, CONV_DIM)
    ckpt = tf.train.Checkpoint(model=model, optimizer=optimizer)
    min_all_loss_ckpt_manager = tf.train.CheckpointManager(
        ckpt, f"{CKPT_DIR}/min_all_loss/", max_to_keep=1)
    min_mid_loss_ckpt_manager = tf.train.CheckpointManager(
        ckpt, f"{CKPT_DIR}/min_mid_loss/", max_to_keep=1)
    max_all_acc_ckpt_manager = tf.train.CheckpointManager(
        ckpt, f"{CKPT_DIR}/max_all_acc/", max_to_keep=1)
    max_mid_acc_ckpt_manager = tf.train.CheckpointManager(
        ckpt, f"{CKPT_DIR}/max_mid_acc/", max_to_keep=1)

    # Average losses, accuracies and learning rate per epoch for plotting figures
    avg_train_all_losses = []
    avg_train_mid_losses = []
    avg_train_all_accs = []
    avg_train_mid_accs = []
    avg_valid_all_losses = []
    avg_valid_mid_losses = []
    avg_valid_all_accs = []
    avg_valid_mid_accs = []
    learning_rates = []
    print('')
    for epoch in range(EPOCH):

        # According to limited memory space (RAM),
        #   train the model based on part of data each epoch. (TRAIN_BATCH_LEN / total)
        train_batches = []
        random_train_idx = np.arange(len(x_train) - BATCH_SIZE)
        np.random.shuffle(random_train_idx)
        global TRAIN_BATCH_LEN
        if TRAIN_BATCH_LEN is None: TRAIN_BATCH_LEN = len(random_train_idx)
        else: random_train_idx = random_train_idx[:TRAIN_BATCH_LEN]
        progress_bar = tqdm(
            random_train_idx,
            total=len(random_train_idx),
            ascii=True,
            desc=f"Sampling random train batches " +
            f"({TRAIN_BATCH_LEN}/{len(x_train)-BATCH_SIZE}={TRAIN_BATCH_LEN/(len(x_train)-BATCH_SIZE)*100:.3f}%)"
        )
        for i in progress_bar:
            train_batches.append(
                (x_train[i:i + BATCH_SIZE], y_train[i:i + BATCH_SIZE]))

        # According to limited memory space (RAM),
        #   validate the model based on part of data each epoch. (VALID_BATCH_LEN / total)
        valid_batches = []
        random_valid_idx = np.arange(len(x_valid) - BATCH_SIZE)
        np.random.shuffle(random_valid_idx)
        global VALID_BATCH_LEN
        if VALID_BATCH_LEN is None or VALID_BATCH_LEN > len(random_valid_idx):
            VALID_BATCH_LEN = len(random_valid_idx)
        else:
            random_valid_idx = random_valid_idx[:VALID_BATCH_LEN]
        progress_bar = tqdm(
            random_valid_idx,
            total=len(random_valid_idx),
            ascii=True,
            desc=f"Sampling random valid batches " +
            f"({VALID_BATCH_LEN}/{len(x_valid)-BATCH_SIZE}={VALID_BATCH_LEN/(len(x_valid)-BATCH_SIZE)*100:.3f}%)"
        )
        for i in progress_bar:
            valid_batches.append(
                (x_valid[i:i + BATCH_SIZE], y_valid[i:i + BATCH_SIZE]))

        # Reset metrics
        train_all_loss.reset_states()
        train_mid_loss.reset_states()
        train_all_acc.reset_states()
        train_mid_acc.reset_states()
        valid_all_loss.reset_states()
        valid_mid_loss.reset_states()
        valid_all_acc.reset_states()
        valid_mid_acc.reset_states()

        # Train model
        print('')
        print(f"EPOCH {epoch+1:2d}/{EPOCH} [TRAIN]:")
        progress_bar = tqdm(enumerate(train_batches),
                            desc=f"{epoch+1:2d}/{EPOCH}",
                            total=len(train_batches),
                            ascii=True)
        for (i, (x_input, y_real)) in progress_bar:
            y_pred, attns_forward, attns_backward = train_step(x_input, y_real)
            if MODEL_MODE == 'seq2seq':
                progress_bar.set_description(
                    f"AL {train_all_loss.result():.4f} ML {train_mid_loss.result():.4f} "
                    +  # all loss & mid loss
                    f"AA {train_all_acc.result():.2f}% MA {train_mid_acc.result():.2f}% "
                    +  # all acc  & mid acc
                    f"LR {optimizer._decayed_lr('float32').numpy():.6f}")
                # f" | BATCH_LEN: {BATCH_LEN} | BATCH_SIZE: {BATCH_SIZE}")
            else:
                progress_bar.set_description(
                    f"{epoch+1:2d}/{EPOCH} TRAIN | " +
                    f"mid loss: {train_mid_loss.result():.4f} | " +
                    f"mid acc: {train_mid_acc.result():.2f}% | " +
                    f"lr: {optimizer._decayed_lr('float32').numpy():.10f}"
                )  # | ")  # +
                # f"BATCH_LEN: {BATCH_LEN} | BATCH_SIZE: {BATCH_SIZE}")
        if MODEL_MODE == 'seq2seq':
            avg_train_all_losses.append(train_all_loss.result())
        avg_train_mid_losses.append(train_mid_loss.result())
        if MODEL_MODE == 'seq2seq':
            avg_train_all_accs.append(train_all_acc.result())
        avg_train_mid_accs.append(train_mid_acc.result())
        learning_rates.append(optimizer._decayed_lr('float32').numpy())

        # Validate model
        # os.system('cls')
        # print('')
        print(f"EPOCH {epoch+1:2d}/{EPOCH} [VALID]:")
        progress_bar = tqdm(valid_batches,
                            desc=f"{epoch+1:2d}/{EPOCH}",
                            total=len(valid_batches),
                            ascii=True)
        for (x_input, y_real) in progress_bar:
            y_pred = valid_step(x_input, y_real)
            if MODEL_MODE == 'seq2seq':
                progress_bar.set_description(
                    f"AL {valid_all_loss.result():.4f} ML {valid_mid_loss.result():.4f} "
                    +  # all loss & mid loss
                    f"AA {valid_all_acc.result():.2f}% MA {valid_mid_acc.result():.2f}%"
                )  # all acc  & mid acc
                # f" | BATCH_LEN: {BATCH_LEN} | BATCH_SIZE: {BATCH_SIZE}")
            else:
                progress_bar.set_description(
                    f"{epoch+1:2d}/{EPOCH} VALID | " +
                    f"mid loss: {valid_mid_loss.result():.4f} | " +
                    f"mid acc: {valid_mid_acc.result():.2f}%")  #  | " +
                # f"BATCH_LEN: {BATCH_LEN} | BATCH_SIZE: {BATCH_SIZE}")
        if MODEL_MODE == 'seq2seq':
            avg_valid_all_losses.append(valid_all_loss.result())
        avg_valid_mid_losses.append(valid_mid_loss.result())
        if MODEL_MODE == 'seq2seq':
            avg_valid_all_accs.append(valid_all_acc.result())
        avg_valid_mid_accs.append(valid_mid_acc.result())

        # Save the model checkpoint if this is the best one (with minimum loss or maximum accuracy).
        print('')
        if MODEL_MODE == 'seq2seq' and valid_all_loss.result() <= min(
                avg_valid_all_losses):
            print("Minimum all loss: improved! Model saved.")
            min_all_loss_ckpt_manager.save()
        else:
            print("Minimum all loss: did not improve, model not saved.")
        if valid_mid_loss.result() <= min(avg_valid_mid_losses):
            print("Minimum mid loss: improved! Model saved.")
            min_mid_loss_ckpt_manager.save()
        else:
            print("Minimum mid loss: did not improve, model not saved.")
        if MODEL_MODE == 'seq2seq' and valid_all_acc.result() >= max(
                avg_valid_all_accs):
            print("Maximum all acc : improved! Model saved.")
            max_all_acc_ckpt_manager.save()
        else:
            print("Maximum all acc : did not improve, model not saved.")
        if valid_mid_acc.result() >= max(avg_valid_mid_accs):
            print("Maximum mid acc : improved! Model saved.")
            max_mid_acc_ckpt_manager.save()
        else:
            print("Maximum mid acc : did not improve, model not saved.")
        print('')

        show_pred_and_truth(y_real, y_pred, PRED_MODE)

        print("Plotting history figure... ", end='')
        plot_history(
            CKPT_DIR, {
                'train_all_loss': avg_train_all_losses,
                'train_mid_loss': avg_train_mid_losses,
                'train_all_acc': avg_train_all_accs,
                'train_mid_acc': avg_train_mid_accs,
                'valid_all_loss': avg_valid_all_losses,
                'valid_mid_loss': avg_valid_mid_losses,
                'valid_all_acc': avg_valid_all_accs,
                'valid_mid_acc': avg_valid_mid_accs,
                'lr': learning_rates,
            })
        print("Done.")

        print("Plotting attention figures... ", end='')
        plot_attns(CKPT_DIR, attns_forward, attns_backward)
        print("Done.\n")

    return
Exemplo n.º 12
0
        avg_infection_rates[key] = avg_results[key][-1][2] / 1000
    return avg_infection_rates


def sample_variance(sample):
    return sum((sample - (sample / len(sample)))**2) / (len(sample) - 1)


def sample_average(sample):
    return sum(sample) / len(sample)


def margin_of_error(sample, confidence=0.9):
    var_sample = sample_variance(sample)
    p_val = (1 - confidence) / 2
    return (var_sample / len(sample))**0.5 * stats.t.isf(
        p_val,
        len(sample) - 1)


def load_90_results():
    with open('90ec_100rep.p', 'rb') as f:
        return pickle.load(f)


if __name__ == '__main__':
    results_90 = load_90_results()
    avg_results_90 = get_averaged_results(results_90)
    for key in avg_results_90:
        plot.plot_history(avg_results_90[key], str(key))
Exemplo n.º 13
0
def run(dimension,
        composition,
        duration,
        acceleration,
        speed,
        box_size,
        infection_range,
        probability,
        efficacy,
        save_name,
        repeat=1,
        show_graph=False,
        autoexit=True):
    assert infection_range <= box_size
    sectors, rem = divmod(dimension, box_size)
    assert rem == 0
    if save_name:
        target_dir = f'results/{save_name}'
        if not os.path.exists(target_dir):
            os.mkdir(target_dir)
    clock = pygame.time.Clock()
    screen = pygame.display.set_mode([dimension, dimension])
    running = True
    for t in range(repeat):
        locations = logic.rand_loc(sum(composition), dimension)
        person_state = []
        for i, cnt in enumerate(composition):
            person_state += [i] * cnt
        person_state = np.asarray(person_state)
        time_of_infection = np.zeros(len(locations))
        prev_offsets = np.zeros((len(locations), 2))
        history = [composition]
        frame = 0
        stop_sim = False
        while running:
            for event in pygame.event.get():
                if event.type == QUIT:
                    running = False
            if stop_sim:
                continue
            screen.fill(BLACK)
            frame += 1

            still_infected_filter = (person_state == 1) & (
                frame - time_of_infection <= duration)
            now_cured_filter = (person_state == 1) & (~still_infected_filter)
            person_state += now_cured_filter

            locations, prev_offsets = logic.rand_offset(
                locations, prev_offsets, dimension, acceleration, speed)

            for i, loc in enumerate(locations):
                color = colors[person_state[i]]
                pygame.draw.circle(screen, color, loc, radius)

            infected = locations[person_state == 1]
            infected_grid = logic.create_grid(infected, sectors, box_size)
            for i, person in enumerate(locations):
                if person_state[i] in (0, 3):
                    cnt = logic.radius_count(person, infected_grid, sectors,
                                             box_size, infection_range)
                    spread = logic.spread(cnt, probability)
                    if spread:
                        if person_state[i] == 0 or random() > efficacy:
                            person_state[i] = 1
                            time_of_infection[i] = frame

            current_composition = tuple(
                np.count_nonzero(person_state == i) for i in range(4))
            history.append(current_composition)
            if history[-1][1] == 0:
                if show_graph:
                    plot_history(history)
                if save_name:
                    with open(f'results/{save_name}/{t}.p', 'wb') as f:
                        pickle.dump(history, f)
                if not autoexit and t == repeat - 1:
                    stop_sim = True
                else:
                    break
            clock.tick(fps)
            show_fps(screen, dimension, clock, font1)
            runinfo = save_name + '_' + str(t)
            show_runinfo(screen, runinfo, font2)
            show_composition(screen, dimension, current_composition, font2)
            pygame.display.update()
        if not running:
            break
Exemplo n.º 14
0
    async def run(self):
        """Run application."""
        _LOGGER.debug("Starting application.")
        plt.figure(figsize=(30, 30))
        model = lmb.CV(0.9, 1.5)
        sensor = lmb.PositionSensor()
        sensor.lambdaB = 2
        sensor.pD = 0.8
        targets = [
            np.array([0.0, 0.0, 1, 0.5]),
            np.array([0.0, 10.0, 1, -0.5]),
        ]
        ntargets_true = []
        ntargets_verified = []
        ntargets = []
        plt.subplot(3, 1, 1)
        history = []
        origin = np.array([58.3887657, 15.6965082])
        pre_enof_targets = 0
        ospa, gospa = [], []
        last_time = 0
        for k in range(30):
            # print()
            print("k:", k)
            if k > 0:
                sensor.lambdaB = 0.2
                await self.predict(model, k, last_time)
                tracker_targets = await self.get_targets()
                # print("Predicted: ", tracker_targets)
                for target in targets:
                    target[0:2] += target[2:]
            # if k == 5:
                # targets.append(np.array([5.0, 5.0, 1.0, 0.0]))
            if k % 7 == 0:
                targets.append(np.random.multivariate_normal(
                    np.array([k, 7.0, 0.0, 0.0]),
                    np.diag([1, 0.5, 1, 1])))
            if k % 9 == 1:
                del targets[-1]
            if k == 10:
                targets.append(np.array([k, -30.0, 1.0, -0.5]))
            # if k == 20:
                # targets.append(np.array([k, 0.0, 1.0, 4.0]))

            reports = [lmb.GaussianReport(
                # np.random.multivariate_normal(t[0:2], np.diag([0.01] * 2)),  # noqa
                cf.ne2ll(t[0:2, np.newaxis], origin),
                np.eye(2) * 0.1, 0.01)
                       for i, t in enumerate(targets)]
            sensor.lambdaB = max(0.2 * (len(reports) - pre_enof_targets), 0.01 * len(reports))
            await self.correct(sensor, reports, k)
            tracker_targets = await self.get_targets()
            history.append((k, tracker_targets))
            pre_enof_targets = await self.enof_targets()
            ntargets.append(pre_enof_targets)
            ntargets_verified.append(await self.nof_targets(0.7))
            ntargets_true.append(len(targets))
            ll_targets = [np.concatenate((cf.ne2ll(t[0:2], origin), t[2:])) for t in targets]
            ospa.append(self.tracker.ospa(ll_targets, 1, 2))
            gospa.append(self.tracker.gospa(ll_targets, 1, 1))
            plot.plot_scan(reports, origin)
            plt.plot([t[0] for t in targets],
                     [t[1] for t in targets],
                     marker='D', color='y', alpha=.5, linestyle='None')
            print("Nof clusters: ", self.tracker.nof_clusters)
        plot.plot_history(history, origin, covellipse=False, min_r=0.7)
        plt.xlim([-1, k + 3])
        plt.ylabel('Tracks')
        plt.subplot(3, 1, 2)
        plt.plot(ntargets, label='Estimate')
        plt.plot(ntargets_true, label='True')
        plt.plot(ntargets_verified, label='Verified', marker='*')
        plt.ylabel('# Targets')
        plt.legend(fancybox=True, framealpha=0.5, loc=4, prop={'size': 10})
        plt.xlim([-1, k + 3])
        plt.subplot(3, 1, 3)
        plt.plot(ospa, label="OSPA")
        plt.plot(gospa, label="GOSPA")
        plt.legend()
        plt.xlim([-1, k + 3])
Exemplo n.º 15
0
    if new_model:
        earlyStopping = EarlyStopping(monitor='val_loss',
                                      patience=20,
                                      verbose=0,
                                      mode='min')
        mcp_save = ModelCheckpoint(path,
                                   save_best_only=True,
                                   monitor='val_loss',
                                   mode='min')

        model = create_network()
        final_model = model.fit(x_train,
                                y_train,
                                epochs=epochs,
                                verbose=0,
                                callbacks=[earlyStopping, mcp_save],
                                validation_data=(x_val, y_val))

        stopped_epoch = earlyStopping.stopped_epoch if earlyStopping.stopped_epoch != 0 else epochs
        print(f'Stopped epoch: {stopped_epoch}')

        plot_history(final_model, f'{configuration}')
        scores = model.evaluate(x_test, y_test, verbose=0)
        print(f'Mean accuracy %: {100 * scores[1]}')

    else:
        model = load_model(path)
        new_scores = model.evaluate(x_test, y_test, verbose=0)
        print(f'Mean accuracy %: {100 * new_scores[1]}')
Exemplo n.º 16
0
def driver(yaml_path):
    """Runs the cropping, verifying, training or testing functions based on the configuration provided.

    Args:
        yaml_path   (str): The filepath that contains all the configurations needed.
    """
    if not yaml_path.endswith('.yml'):
        print("Unexpected input detected. Proper usage:\"python main.py CONFIG_NAME.yml\"")
        return

    print(yaml_path)
    config_loaded = False

    if is_assume_config_folder:
        yaml_path = config_folder + yaml_path

    if os.path.isfile(yaml_path):
        with open(yaml_path, 'r') as stream:
            try:
                configs = yaml.safe_load(stream)
                config_loaded = True
            except yaml.YAMLError as exc:
                print(exc)
    else:
        print(f"{yaml_path} not found.")

    if config_loaded:
        is_valid_configs = config_checker.check(configs)
        if is_valid_configs:
            filepaths = configs['filepaths']
            data_types = ['train', 'val', 'test']
            data_paths = filepaths['data_paths']
            cropped_paths = filepaths['cropped_paths']
            instance_paths = filepaths['instances_paths']
            verify_progress_paths = filepaths['verify_progress_paths']

            print(f"data_paths:{data_paths}")
            print(f"cropped_paths:{cropped_paths}")
            print(f"instance_paths:{instance_paths}")
            print(f"verify_progress_paths:{verify_progress_paths}")

            for each in data_types:
                if cropped_paths[each] != None:
                    make_if_not_exist(cropped_paths[each])
            
            if configs['mode'] == "crop":
                selected_type = configs['crop_options']['dataset_choice']
                if selected_type in data_types:
                    image_preprocessor.crop_images_driver(instance_paths[selected_type], data_paths[selected_type], cropped_paths[selected_type])
                    print(f"{selected_type} Image cropping complete.")
                elif selected_type == "all":
                    for each in data_types:
                        image_preprocessor.crop_images_driver(instance_paths[each], data_paths[each], cropped_paths[each])
                        print(f"{each} Image cropping complete.")
                else:
                    print("Unexpected error in configs['crop_options']['dataset_choice'], this should have been caught by config_checker!")

            elif configs['mode'] == "verify":
                selected_type = configs['verify_options']['dataset_choice']
                if selected_type in data_types:
                    image_preprocessor.file_num_compare(instance_paths[selected_type], data_paths[selected_type], cropped_paths[selected_type], selected_type, 
                                                        configs['verify_options']['remove_extra'], configs['verify_options']['crop_missing'])
                elif selected_type == "all":
                    for each in data_types:
                        image_preprocessor.file_num_compare(instance_paths[each], data_paths[each], cropped_paths[each], each, 
                                                            configs['verify_options']['remove_extra'], configs['verify_options']['crop_missing'])
                else:
                    print("Unexpected error in configs['verify_options']['dataset_choice'], this should have been caught by config_checker!")

                if selected_type in data_types:
                    image_preprocessor.verify_jpg_driver(instance_paths[selected_type], data_paths[selected_type],
                        cropped_paths[selected_type], verify_progress_paths[selected_type], 
                        configs['verify_options']['reset_progress'], configs['verify_options']['verify_save_step'])
                    print(f"{selected_type} Verification complete.")
                elif selected_type == "all":
                    for each in data_types:
                        image_preprocessor.verify_jpg_driver(instance_paths[each], data_paths[each],
                                            cropped_paths[each], verify_progress_paths[each], 
                                            configs['verify_options']['reset_progress'], configs['verify_options']['verify_save_step'])
                        print(f"{each} Verification complete.")
                else:
                    print("Unexpected error in configs['verify_options']['dataset_choice'], this should have been caught by config_checker!")

            elif configs['mode'] == "train":
                make_if_not_exist(filepaths['models_path'])
                train_test_options = configs['train_test_options']
                make_if_not_exist(filepaths['models_path'] + train_test_options['model_name'])
                train.train_driver(train_test_options['model_name'], train_test_options['pretrained'], train_test_options['load_choice'],
                                    filepaths['models_path'], cropped_paths['train'], cropped_paths['val'], train_test_options['num_epochs'],
                                    train_test_options['batch_size'], train_test_options['image_size'], 
                                    train_test_options['num_workers'], train_test_options['use_num_worker_mult'])

            elif configs['mode'] == "test":
                train_test_options = configs['train_test_options']
                test.test_driver(train_test_options['model_name'], train_test_options['load_choice'], 
                            filepaths['models_path'], cropped_paths['test'], 
                            train_test_options['batch_size'], train_test_options['image_size'], 
                            train_test_options['num_workers'], train_test_options['use_num_worker_mult'])
            elif configs['mode'] == "plot":
                train_test_options = configs['train_test_options']
                plot.plot_history(train_test_options['model_name'], filepaths['models_path'])
            else:
                print("Unexpected error in configs['mode'], this should have been caught by config_checker!")
Exemplo n.º 17
0
    last_run = None
    for l in logs:
        run_id, results = parse.parse_run(
            os.path.join(result_repo_vm_root, plan, l))
        runs[run_id] = results
        last_run = run_id

    # figure out what benchmarks we should plot in the graph. We use the benchmarks that appeared in the last run
    benchmarks = [r['benchmark'] for r in runs[last_run]]

    print("Plan: %s" % plan)
    print("Runs: %s" % runs)
    print("Last run: %s" % last_run)
    print("Benchmarks: %s" % benchmarks)

    # figure out the baseline and get the result for the baseline
    plan_config = parse.get_config_for_plan(config, plan)
    assert plan_config != None, "Cannot get config for plan"
    baseline_builds = plan_config['baseline']
    print("Baseline: %s" % baseline_builds)

    baseline = plot.calculate_baseline(baseline_results, baseline_builds,
                                       "execution_times")
    pp.pprint(baseline)

    # plot
    fig = plot.plot_history(runs, plan, benchmarks, from_date, to_date,
                            "execution_times", baseline)
    path = os.path.join(output_dir, "%s_%s_history.html" % (prefix, plan))
    fig.write_html(path)