コード例 #1
0
config['scaling'] = "minmax"
if "np.log" in config['scaling']:
    images = np.log1p(images)

# set tf random seed
tf.random.set_seed(config['random_seed'])
# ================== Import Data ==================
with tf.device(get_tf_device(20)):
    padding = 'same'
    model = Sequential()
    model.add(Dense(4, activation='relu', input_shape=(256, )))
    model.compile(
        loss='mse',
        optimizer='adam',
    )
    print(model.summary())

    # Run experiment
    experiment = Experiment(
        model=model,
        config=config,
        model_type="regression",
        experiment_name="generate_results_pos_double_linreg",
    )
    experiment.run_kfold(
        images[double_indices],
        normalize_position_data(positions[double_indices]),
    )
    experiment.save(save_model=True, save_indices=False)
    print("Finished experiment:", experiment.id)
コード例 #2
0
# log-scale the images if desireable
config['scaling'] = "minmax"
if "np.log" in config['scaling']:
    images = np.log1p(images)

# set tf random seed
tf.random.set_seed(config['random_seed'])
# ================== Import Data ==================
with tf.device(get_tf_device(20)):
    model = Sequential()
    model.add(Dense(2, activation='relu', input_shape=(256, )))
    model.compile(
        loss='mse',
        optimizer='adam',
    )
    print(model.summary())

    # Run experiment
    experiment = Experiment(
        model=model,
        config=config,
        model_type="regression",
        experiment_name="generate_results_energies_double_linreg",
    )
    experiment.run_kfold(
        images[double_indices],
        energies[double_indices],
    )
    experiment.save(save_model=True, save_indices=False)
    print("Finished experiment:", experiment.id)
コード例 #3
0
                   input_shape=(16, 16, 1)))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(Flatten())
        model.add(Dense(128, activation='relu'))
        model.add(Dense(1, activation='sigmoid'))

        model.compile(optimizer='adam',
                      loss='binary_crossentropy',
                      metrics=['accuracy'])

        # Run experiment
        experiment = Experiment(model=model,
                                config=config,
                                model_type="classification",
                                experiment_name=search_name)
        experiment.run(
            normalize_image_data(images[train_idx]),
            labels[train_idx],
            normalize_image_data(images[val_idx]),
            labels[val_idx],
        )
        experiment.save()
        id_param[experiment.id] = {
            'batch_size': b_size,
        }
search_path = get_git_root() + "experiments/searches/"
with open(search_path + search_name + ".json", "w") as fp:
    json.dump(id_param, fp, indent=2)
コード例 #4
0
tf.random.set_seed(config['random_seed'])
# ================== Import Data ==================
with tf.device(get_tf_device(20)):
    padding = 'same'
    model = Sequential()
    model.add(
        Conv2D(8, kernel_size=3, activation='relu', input_shape=(16, 16, 1),
               padding='same')
    )
    model.add(Flatten())
    model.add(Dense(2, activation='linear'))
    model.compile(
        loss='mse',
        optimizer='adam',
    )
    print(model.summary())

    # Run experiment
    experiment = Experiment(
        model=model,
        config=config,
        model_type="regression",
        experiment_name="generate_results_cnn_small",
    )
    experiment.run_kfold(
        images[single_indices],
        normalize_position_data(positions[single_indices])[:, :2],
    )
    experiment.save(save_model=True, save_indices=False)
    print("Finished experiment:", experiment.id)
コード例 #5
0
    model.add(Conv2D(32, kernel_size=3, activation='relu', padding='same'))
    model.add(Conv2D(32, kernel_size=3, activation='relu', padding='same'))
    model.add(Conv2D(32, kernel_size=3, activation='relu', padding='same'))
    model.add(Flatten())
    model.add(Dense(1, activation='sigmoid'))
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    models['cnn_deep'] = model

    # Run experiments
    for k, m in models.items():
        print(m.summary())
        experiment = Experiment(model=m,
                                config=config,
                                model_type="classification",
                                experiment_name="full_training_classifier_" +
                                k)
        if "logistic" in k or "dense" in k:
            experiment.run(
                images.reshape(images.shape[0], 256),
                labels,
            )
        else:
            experiment.run(
                images,
                labels,
            )
        print("Outputting model:", k)
        experiment.save()
        mpath = experiment.config['path_args']['models'] + experiment.id + ".h5"
コード例 #6
0
    model = Sequential()
    model.add(
        Conv2D(8,
               kernel_size=3,
               activation='relu',
               input_shape=images.shape[1:],
               padding='same'))
    model.add(Flatten())
    model.add(Dense(1, activation='sigmoid'))
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    # Run experiment
    experiment = Experiment(model=model,
                            config=config,
                            model_type="classification",
                            experiment_name="full_training_cnn_small")
    experiment.run(
        images,
        labels,
    )
    experiment.save(save_model=True, save_indices=False)
    print("Finished experiment:", experiment.id)
    lpath = experiment.config['path_args']['models'] + "models.log"
    log = open(lpath, "a")
    log.write(experiment.id + ":\n")
    log.write(os.path.basename(__file__) + "\n")
    log.write(repr(config) + "\n")
    log.close()
コード例 #7
0
if "np.log" in config['scaling']:
    images = np.log1p(images)

# For the pretrained vgg model we have to use data with 3 channels.
# This is solved by concatenating the images with themselves
images = np.concatenate((images, images, images), axis=-1)

# set tf random seed
tf.random.set_seed(config['random_seed'])
with tf.device(get_tf_device(20)):
    # Build model
    model = pretrained_model("VGG16", input_dim=(16, 16, 3))
    model.add(Dense(512, activation='relu'))
    model.add(Dense(512, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    # Run experiment
    experiment = Experiment(model=model,
                            config=config,
                            model_type="classification",
                            experiment_name="full_training_pretrained_vgg16")
    experiment.run_kfold(
        images,
        labels,
    )
    experiment.save(save_model=True, save_indices=False)
    print("Finished experiment:", experiment.id)
コード例 #8
0
# set tf random seed
tf.random.set_seed(config['random_seed'])
with tf.device(get_tf_device(20)):
    # Small Dense network
    model = Sequential()
    model.add(InputLayer(input_shape=(256, )))
    model.add(Dense(8, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    # Run experiment
    experiment = Experiment(model=model,
                            config=config,
                            model_type="classification",
                            experiment_name="full_training_dense_small")
    experiment.run_kfold(
        images.reshape(images.shape[0], 256),
        labels,
    )
    experiment.save(save_model=True, save_indices=False)
    print("Finished experiment:", experiment.id)
    lpath = experiment.config['path_args']['models'] + "models.log"
    log = open(lpath, "a")
    log.write(experiment.id + ":\n")
    log.write(os.path.basename(__file__) + "\n")
    log.write(repr(config) + "\n")
    log.close()
コード例 #9
0
    outputs = DSNT()(x)
    model = tf.keras.Model(inputs=inputs, outputs=outputs[1])
    prediction_model = tf.keras.Model(inputs=inputs, outputs=outputs)
    model.compile(
        optimizer='adam',
        loss='mse',
    )
    prediction_model.compile(
        optimizer='adam',
        loss='mse',
    )
    print(model.summary())

    # Run experiment
    experiment = Experiment(model=model,
                            config=config,
                            model_type="regression",
                            experiment_name=search_name)
    experiment.run(
        normalize_image_data(images[single_indices]),
        normalize_position_data(positions[single_indices])[:, :2],
    )
    experiment.save()
    mpath = experiment.config['path_args']['models'] + experiment.id + ".h5"
    prediction_model.save(mpath)
    heatmaps, coords = prediction_model.predict(
        images[single_indices][experiment.indices['fold_0']['val_idx']])
    np.save("dsnt_heatmaps_pred.npy", heatmaps)
    np.save("dsnt_coords_pred.npy", coords)
    print("Finished experiment.")
    print("Name:", search_name)
    print("id:", experiment.id)
コード例 #10
0
    model.add(Conv2D(32, kernel_size=(3, 3),
                     activation='relu', input_shape=(16, 16, 1),
                     padding=padding))
    model.add(Conv2D(64, (3, 3), activation='relu', padding=padding))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(64, (3, 3), activation='relu', padding=padding))
    model.add(Conv2D(64, (3, 3), activation='relu', padding=padding))
    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(Dense(1, activation='linear'))
    model.compile(
        loss='mse',
        optimizer='adam',
    )
    print(model.summary())

    # Run experiment
    experiment = Experiment(
        model=model,
        config=config,
        model_type="regression",
        experiment_name=search_name
    )
    experiment.run(
        normalize_image_data(images[single_indices]),
        energies[single_indices, 0],
    )
    experiment.save()
    mpath = experiment.config['path_args']['models'] + experiment.id + ".h5"
    model.save(mpath)
コード例 #11
0
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(64, (3, 3), activation='relu', padding=padding))
    model.add(Conv2D(64, (3, 3), activation='relu', padding=padding))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(
        optimizer='adam',
        loss='binary_crossentropy',
        metrics=['accuracy']
    )

    # Run experiment
    experiment = Experiment(
        model=model,
        config=config,
        model_type="classification",
        experiment_name="full_training_classifier"
    )
    experiment.run(
        images,
        labels,
    )
    experiment.save()
    mpath = experiment.config['path_args']['models'] + experiment.id + ".h5"
    model.save(mpath)
    print(experiment.id)
    lpath = experiment.config['path_args']['models'] + "models.log"
    log = open(lpath,"a")
    log.write(experiment.id + ":\n")
    log.write(os.path.basename(__file__)+"\n")
    log.write(repr(config) + "\n")