コード例 #1
0
ファイル: reg.py プロジェクト: tjacek/pcloud_utils
def apply_reg(in_path,nn_path,out_path):
    model=cnn.get_model(nn_path)
    def helper(frame_i):       
        r_i=model.predict(frame_i)
        position=int(r_i[0][0]),int(r_i[0][1])
        return simple_cut(frame_i,position) #frame_i
    frames.transform_template(in_path,out_path,helper)
コード例 #2
0
ファイル: svm.py プロジェクト: elohhim/SNR-2018
def extract_features_with_cnn(cnn_path):
    print(f"Extracting features using CNN: {cnn_path}")
    train_loader, test_loader = get_data_loaders()
    # load pytorch NN and use as feature extractor
    feature_extractor = get_model('densenet121', True)
    feature_extractor.load_state_dict(torch.load(cnn_path))
    feature_extractor.classifier = nn.Sequential()
    feature_extractor.to(device)
    feature_extractor.eval()
    Xs_train, ys_train, Xs_test, ys_test = [], [], [], []
    with torch.no_grad():
        for inputs, labels in train_loader:
            inputs, labels = inputs.to(device), labels.to(device)
            features = feature_extractor.forward(inputs)
            Xs_train.append(features)
            ys_train.append(labels)
        for inputs, labels in test_loader:
            inputs, labels = inputs.to(device), labels.to(device)
            features = feature_extractor.forward(inputs)
            Xs_test.append(features)
            ys_test.append(labels)
    X_train = torch.cat(Xs_train).to('cpu').numpy()
    y_train = torch.cat(ys_train).to('cpu').numpy()
    X_test = torch.cat(Xs_test).to('cpu').numpy()
    y_test = torch.cat(ys_test).to('cpu').numpy()
    return X_train, y_train, X_test, y_test
コード例 #3
0
def main():
    BATCH_SIZE = 32

    data_frame = pd.read_csv('data/sample/driving_log.csv',
                             usecols=[0, 1, 2, 3])

    # shuffle the data
    data_frame = data_frame.sample(frac=1).reset_index(drop=True)

    # 80-20 training validation split
    training_split = 0.8
    num_rows_training = int(data_frame.shape[0] * training_split)
    training_data = data_frame.loc[0:num_rows_training]
    validation_data = data_frame.loc[num_rows_training:]

    # Remove all zero angle data from training set to avoid straight driving bias
    # This is quite drastic but proved effective
    training_data = training_data[training_data.steering != 0]

    training_generator = get_generator(training_data, batch_size=BATCH_SIZE)
    validation_data_generator = get_generator(validation_data,
                                              batch_size=BATCH_SIZE,
                                              validation=True)

    model = cnn.get_model()

    model.summary()

    # Load weights from previous training if more training epochs are required
    #model.load_weights('model.h5')

    samples_per_epoch = (training_data.shape[0] * 8 // BATCH_SIZE) * BATCH_SIZE

    history = model.fit_generator(training_generator,
                                  validation_data=validation_data_generator,
                                  samples_per_epoch=samples_per_epoch,
                                  nb_epoch=8,
                                  nb_val_samples=validation_data.shape[0])

    print("Saving model weights and configuration file.")

    model.save_weights('model.h5', overwrite=True)
    with open('model.json', 'w') as outfile:
        json.dump(model.to_json(), outfile)

    plot_loss(history)

    # Save visualization of model
    output_dir = './outputs/'
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    kerasplot(model, to_file=output_dir + 'model.png', show_shapes=True)
コード例 #4
0
def get_model(model_name):
    input = tf.placeholder(tf.float32, [1, 224, 224, 3], name = 'input_tensor')
    if model_name == 'cnn':
        output = cnn.get_model(input)
    elif model_name == 'dsnet':
        input = tf.placeholder(tf.float32, [1, 47, 47, 3], name = 'input_tensor')
        output = dsnet.hyp_net_inference(input)
    elif model_name == 'fc4-alex':
        output = fc4.get_model(input)
    elif model_name == 'fc4-squeeze':
        output = fc4_squeeze.create_convnet(input)
    elif model_name == 'fpc':
        output = fpc.get_model(input)
    elif model_name == 'ours_conv':
        output = ours.test_architecture2(input)
    else:
        output = ours.test_architecture2_no_param(input)
    output = tf.identity(output, name="output_tensor")
    return input, output
コード例 #5
0
    if binary:
        conf_pairs, conf_y = get_bin_pairs(conf)
    else:
        conf_pairs, conf_y = get_mult_pairs(conf)

    non_conf_pairs, non_conf_y = get_bin_pairs(non_conf, conf=False)

    # X = conf_pairs + non_conf_pairs
    # y = np.concatenate((conf_y, non_conf_y))

    train_cnn(model, conf_pairs, conf_y)


if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    parser.add_argument('conf_path', help='Path to conflict dataset.')
    parser.add_argument('non_conf_path', help='Path to non-conflict dataset.')
    parser.add_argument('-b', '--binary', help="Set 1 for binary")
    args = parser.parse_args()

    if args.binary:
        model = cnn.get_model(MAX_LEN, nb_classes=NB_CLASSES)
    else:
        NB_CLASSES = 5
        model = cnn.get_model(MAX_LEN, nb_classes=NB_CLASSES)

    conflicts = pd.read_csv(args.conf_path)
    non_conflicts = pd.read_csv(args.non_conf_path)
    prepare_data(model, conflicts, non_conflicts, binary=args.binary)
コード例 #6
0
if __name__ == '__main__':
    data_save_directory = constants.cnn_train_test_split_data_directory
    metadata_dict = json_tools.import_json_as_dict(constants.metadata_file)

    label_name = 'combination_label'
    path_to_profiles = constants.preprocessed_beam_profiles_directory + '/beam_profiles_run_{}_raw_downsized.npy'
    save_model_as = constants.cnn_model_saveas
    (x_train, y_train), (
        x_test,
        y_test) = cnn.save_and_load_profiles.load_train_test_split_data(
            data_save_directory, metadata_dict, label_name, path_to_profiles)

    y_train_1h = cnn.preparatory_tools.one_hot(y_train, 2)
    y_test_1h = cnn.preparatory_tools.one_hot(y_test, 2)

    model = cnn.get_model(x_train.shape[-2:])
    model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.01),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.summary()
    lr_decay_callback = tf.keras.callbacks.LearningRateScheduler(
        cnn.cnn_tools.lr_decay, verbose=True)

    BATCH_SIZE = 64
    steps_per_epoch = x_train.shape[0] // BATCH_SIZE
    log_dir = constants.cnn_log_dir + datetime.datetime.now().strftime(
        "%Y%m%d-%H%M%S")
    tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
    history = model.fit(x_train,
                        y_train_1h,
                        batch_size=BATCH_SIZE,
コード例 #7
0
hyperparams = [learning_rates, epoch_counts, batch_sizes, dropout]

permutations = list(itertools.product(*hyperparams))

marcel = permutations[:45]
mahfuza = permutations[45:90]
nigel = permutations[90:135]
lambert = permutations[135:]

X, y = get_input_data(train_file_path='train.json')
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.3,
                                                    random_state=42)

for params in permutations[45:]:
    model = get_model(learning_rate=params[0], dropout=params[3])

    accuracies = []
    losses = []
    precision_scores = []
    recall_scores = []

    # Train and test model
    model.fit(X_train,
              y_train,
              epochs=params[1],
              verbose=1,
              batch_size=params[2])
    model.save('hyperparams_{}_{}_{}_{}'.format(*params))
コード例 #8
0
ファイル: model.py プロジェクト: tigerinus/BehavioralCloning
            if len(batched_images) == batch_size:
                yield (np.array(batched_images),
                       np.array(batched_measurements))
                batched_images = []
                batched_measurements = []


if __name__ == '__main__':
    PARSER = argparse.ArgumentParser()
    PARSER.add_argument('--resume', action='store_true')
    ARGS = PARSER.parse_args()

    print("Building model...", flush=True)

    MODEL = get_model(IMAGE_SHAPE)
    plot_model(MODEL, to_file='model.png')

    MODEL.compile(loss='mse', optimizer='adam', metrics=['accuracy'])

    print("Training...", flush=True)
    RECORDS = get_records(DATA_PATH_LIST)

    if ARGS.resume:
        print("Resuming previous training...")
        MODEL.load_weights('model.h5')

    CALLBACKS = [
        EarlyStopping(verbose=1, patience=2),
        ModelCheckpoint(filepath='model.new.h5', save_best_only=True),
        TensorBoard(write_images=True)