Пример #1
0
def main(data_dir, lib_dir, model_name, batch_size=10):
    ckpt_dir = os.path.join(os.path.dirname(__file__), 'models', model_name)
    clip, fc_filters, tconv_Fnums, tconv_dims, tconv_filters, n_filter, n_branch, \
    reg_scale = network_helper.get_parameters(ckpt_dir)

    print('defining input data')
    features, pred_init_op = import_data(data_dir=data_dir,
                                         batch_size=batch_size)

    print('making network')
    # make network
    ntwk = network_maker.CnnNetwork(features, [],
                                    utils.my_model_fn_tens,
                                    batch_size,
                                    clip=clip,
                                    fc_filters=fc_filters,
                                    tconv_Fnums=tconv_Fnums,
                                    tconv_dims=tconv_dims,
                                    n_filter=n_filter,
                                    n_branch=n_branch,
                                    reg_scale=reg_scale,
                                    tconv_filters=tconv_filters,
                                    make_folder=False)

    print('defining save file')
    save_file = os.path.join('.', lib_dir)

    # evaluate the model for each geometry in the grid file
    print('executing the model ...')
    pred_file = ntwk.predictBin3(pred_init_op,
                                 ckpt_dir=ckpt_dir,
                                 model_name=model_name,
                                 save_file=save_file)
    return pred_file
Пример #2
0
def predict(flags, geo2spec, data_path, save_path):
    #Clear the default graph first for resolving potential name conflicts
    tf.reset_default_graph()
    spec2geo_flag = not geo2spec #Get geo2spec from spec2geo flagg
    ckpt_dir = os.path.join(os.path.abspath(''), 'models', flags.model_name)
    
    clip, forward_fc_filters, tconv_Fnums, tconv_dims, tconv_filters, n_filter, n_branch, \
    reg_scale, backward_fc_filters, conv1d_filters, conv_channel_list, batch_size = network_helper.get_parameters(ckpt_dir)
    print(ckpt_dir)
    # initialize data reader
    if len(tconv_dims) == 0:
        output_size = fc_filters[-1]
    else:
        output_size = tconv_dims[-1]
    features, labels, train_init_op, valid_init_op = data_reader.read_data(input_size=flags.input_size,
                                                               output_size=output_size-2*clip,
                                                               x_range=flags.x_range,
                                                               y_range=flags.y_range,
							        geoboundary=flags.geoboundary,
                                                               cross_val=flags.cross_val,
                                                               val_fold=flags.val_fold,
                                                               batch_size=batch_size,
                                                               shuffle_size=flags.shuffle_size,
								data_dir = flags.data_dir,
							        normalize_input = flags.normalize_input,
                                                                test_ratio = 0.2)

    #if the input is normalized
    if flags.normalize_input:
		    flags.boundary = [-1, 1, -1, 1]

    #Adjust the input of geometry and spectra given the flag
    if (spec2geo_flag):
        geometry = features;
        spectra, pred_init_op = read_tensor_from_test_data(data_path, batch_size)
        print("Your are inferring from spectra to geometry")
    else:
        geometry, pred_init_op = read_tensor_from_test_data(data_path, batch_size)
        spectra = labels
        print("Your are inferring from geometry to spectra")

    # make network
    ntwk = Tandem_network_maker.TandemCnnNetwork(geometry, spectra, model_maker.tandem_model, batch_size,
                                clip=clip, forward_fc_filters=forward_fc_filters,
                                backward_fc_filters = backward_fc_filters,reg_scale=reg_scale,
	                        learn_rate=flags.learn_rate,tconv_Fnums=tconv_Fnums,
				tconv_dims=tconv_dims,n_branch=n_branch,
			        tconv_filters=tconv_filters, n_filter=n_filter,
				decay_step=flags.decay_step, decay_rate=flags.decay_rate, geoboundary = flags.geoboundary,
                                conv1d_filters = conv1d_filters, conv_channel_list = conv_channel_list)

    if (spec2geo_flag):
        ntwk.predict_spec2geo([train_init_op, pred_init_op], ckpt_dir = ckpt_dir, 
                                model_name = flags.model_name, save_file = save_path)
    else:
        ntwk.predict_geo2spec([train_init_op, pred_init_op], ckpt_dir = ckpt_dir, 
                                model_name = flags.model_name, save_file = save_path)
Пример #3
0
def evaluatemain(flags, eval_forward):
    #Clear the default graph first for resolving potential name conflicts
    tf.reset_default_graph()
    TK = time_recorder.time_keeper(time_keeping_file="data/time_keeper.txt")

    ckpt_dir = os.path.join(os.path.abspath(''), 'models', flags.model_name)
    clip, forward_fc_filters, tconv_Fnums, tconv_dims, tconv_filters, \
    n_filter, n_branch, reg_scale = network_helper.get_parameters(ckpt_dir)
    print(ckpt_dir)
    # initialize data reader
    if len(tconv_dims) == 0:
        output_size = fc_filters[-1]
    else:
        output_size = tconv_dims[-1]
    features, labels, train_init_op, valid_init_op = data_reader.read_data(
        input_size=flags.input_size,
        output_size=output_size - 2 * clip,
        x_range=flags.x_range,
        y_range=flags.y_range,
        geoboundary=flags.geoboundary,
        cross_val=flags.cross_val,
        val_fold=flags.val_fold,
        batch_size=flags.batch_size,
        shuffle_size=flags.shuffle_size,
        normalize_input=flags.normalize_input,
        data_dir=flags.data_dir,
        test_ratio=0.01)  #negative test_ratio means test from eval

    #if the input is normalized
    if flags.normalize_input:
        flags.boundary = [-1, 1, -1, 1]

    # make network
    ntwk = Backprop_network_maker.BackPropCnnNetwork(
        features,
        labels,
        model_maker.back_prop_model,
        flags.batch_size,
        clip=flags.clip,
        forward_fc_filters=flags.forward_fc_filters,
        reg_scale=flags.reg_scale,
        learn_rate=flags.learn_rate,
        tconv_Fnums=flags.tconv_Fnums,
        tconv_dims=flags.tconv_dims,
        n_branch=flags.n_branch,
        tconv_filters=flags.tconv_filters,
        n_filter=flags.n_filter,
        decay_step=flags.decay_step,
        decay_rate=flags.decay_rate,
        geoboundary=flags.boundary)

    # evaluate the results if the results do not exist or user force to re-run evaluation
    save_file = os.path.join(os.path.abspath(''), 'data',
                             'test_pred_{}.csv'.format(flags.model_name))
    if flags.force_run or (not os.path.exists(save_file)):
        print('Evaluating the model ...')
        pred_file, truth_file = ntwk.evaluate(
            valid_init_op,
            train_init_op,
            ckpt_dir=ckpt_dir,
            back_prop_epoch=flags.back_prop_epoch,
            stop_thres=flags.stop_threshold,
            verb_step=flags.verb_step,
            model_name=flags.model_name,
            write_summary=True,
            eval_forward=eval_forward,
            time_recorder=TK)
    else:
        pred_file = save_file
        truth_file = os.path.join(os.path.abspath(''), 'data',
                                  'test_truth.csv')

    mae, mse = compare_truth_pred(pred_file, truth_file)

    plt.figure(figsize=(12, 6))
    plt.hist(mse, bins=100)
    plt.xlabel('Mean Squared Error')
    plt.ylabel('cnt')
    plt.suptitle('Backprop (Avg MSE={:.4e})'.format(np.mean(mse)))
    plt.savefig(
        os.path.join(os.path.abspath(''), 'data',
                     'Backprop_{}.png'.format(flags.model_name)))
    plt.show()
    print('Backprop (Avg MSE={:.4e})'.format(np.mean(mse)))
Пример #4
0
def evaluatemain(flags, eval_forward):
    #Clear the default graph first for resolving potential name conflicts
    #Set the environment variable for if this is a cpu only script
    if flags.use_cpu_only:
        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

    print("Start Evaluating now...")
    TK = time_recorder.time_keeper(time_keeping_file="data/time_keeper.txt")

    tf.reset_default_graph()
    ckpt_dir = os.path.join(os.path.abspath(''), 'models', flags.model_name)

    decoder_fc_filters, encoder_fc_filters, spectra_fc_filters, conv1d_filters, \
    filter_channel_list, geoboundary, latent_dim, batch_size = network_helper.get_parameters(ckpt_dir)
    batch_size = batch_size[0]  #Get rid of the list
    geometry, spectra, train_init_op, valid_init_op = data_reader.read_data(
        input_size=flags.input_size,
        output_size=300,
        x_range=flags.x_range,
        y_range=flags.y_range,
        geoboundary=flags.geoboundary,
        cross_val=flags.cross_val,
        val_fold=flags.val_fold,
        batch_size=flags.batch_size,
        shuffle_size=flags.shuffle_size,
        data_dir=flags.data_dir,
        normalize_input=flags.normalize_input,
        test_ratio=0.9999)
    #if the input is normalized
    if flags.normalize_input:
        flags.boundary = [-1, 1, -1, 1]
    print("Boundary read from meta_file is ", geoboundary)
    print("batch_size read from meta_file is ", batch_size)
    print("latent_dim read from meta_file is ", latent_dim)
    # make network
    ntwk = VAE_network_maker.VAENetwork(
        geometry,
        spectra,
        model_maker.VAE,
        batch_size,
        latent_dim,
        spectra_fc_filters=spectra_fc_filters,
        decoder_fc_filters=decoder_fc_filters,
        encoder_fc_filters=encoder_fc_filters,
        reg_scale=flags.reg_scale,
        learn_rate=flags.learn_rate,
        decay_step=flags.decay_step,
        decay_rate=flags.decay_rate,
        geoboundary=flags.geoboundary,
        conv1d_filters=conv1d_filters,
        filter_channel_list=filter_channel_list)

    # evaluate the results if the results do not exist or user force to re-run evaluation
    save_file = os.path.join(os.path.abspath(''), 'data',
                             'test_pred_{}.csv'.format(flags.model_name))

    if flags.force_run or (not os.path.exists(save_file)):
        print('Evaluating the model ...')
        #pred_file, truth_file = ntwk.evaluate(valid_init_op, ckpt_dir=ckpt_dir,
        Xpred_file = ntwk.evaluate(valid_init_op,
                                   train_init_op,
                                   ckpt_dir=ckpt_dir,
                                   model_name=flags.model_name,
                                   write_summary=True,
                                   eval_forward=eval_forward,
                                   time_keeper=TK)

        print("Prediction File output at:", Xpred_file)
        unpack_Xpred(Xpred_file, batch_size)
        #pred_file, truth_file = get_spectra_from_geometry(Xpred_file)
    """
Пример #5
0
def main(flags):
    ckpt_dir = os.path.join(os.path.dirname(__file__), 'models',
                            flags.model_name)
    clip, fc_filters, tconv_Fnums, tconv_dims, tconv_filters, n_filter, n_branch, reg_scale = network_helper.get_parameters(
        ckpt_dir)
    print(ckpt_dir)
    # initialize data reader
    if len(tconv_dims) == 0:
        output_size = fc_filters[-1]
    else:
        output_size = tconv_dims[-1]
    features, labels, train_init_op, valid_init_op = data_reader.read_data(
        input_size=flags.input_size,
        output_size=output_size - 2 * clip,
        x_range=flags.x_range,
        y_range=flags.y_range,
        cross_val=flags.cross_val,
        val_fold=flags.val_fold,
        batch_size=flags.batch_size,
        shuffle_size=flags.shuffle_size)
    # make network
    ntwk = network_maker.CnnNetwork(features,
                                    labels,
                                    utils.my_model_fn_tens,
                                    flags.batch_size,
                                    clip,
                                    fc_filters=fc_filters,
                                    tconv_Fnums=tconv_Fnums,
                                    tconv_dims=tconv_dims,
                                    n_filter=n_filter,
                                    n_branch=n_branch,
                                    reg_scale=reg_scale,
                                    tconv_filters=tconv_filters,
                                    learn_rate=flags.learn_rate,
                                    decay_step=flags.decay_step,
                                    decay_rate=flags.decay_rate,
                                    make_folder=False)

    # evaluate the results if the results do not exist or user force to re-run evaluation
    save_file = os.path.join(os.path.dirname(__file__), 'data',
                             'test_pred_{}.csv'.format(flags.model_name))
    if FORCE_RUN or (not os.path.exists(save_file)):
        print('Evaluating the model ...')
        pred_file, truth_file = ntwk.evaluate(valid_init_op,
                                              ckpt_dir=ckpt_dir,
                                              model_name=flags.model_name,
                                              write_summary=True)
    else:
        pred_file = save_file
        truth_file = os.path.join(os.path.dirname(__file__), 'data',
                                  'test_truth.csv')

    mae, mse = compare_truth_pred(pred_file, truth_file)

    plt.figure(figsize=(12, 6))
    plt.hist(mse, bins=100)
    plt.xlabel('Mean Squared Error')
    plt.ylabel('cnt')
    plt.suptitle('FC + TCONV (Avg MSE={:.4e})'.format(np.mean(mse)))
    plt.savefig(
        os.path.join(
            os.path.dirname(__file__), 'data',
            'fc_tconv_single_channel_result_cmp_{}.png'.format(
                flags.model_name)))
    plt.show()
    print('FC + TCONV (Avg MSE={:.4e})'.format(np.mean(mse)))
Пример #6
0
def evaluatemain(flags, eval_forward, test_ratio, plot_histo=True):
    #Clear the default graph first for resolving potential name conflicts
    print("Start Evaluating now...")
    TK = time_recorder.time_keeper(time_keeping_file="data/time_keeper.txt")

    tf.reset_default_graph()

    ckpt_dir = os.path.join(os.path.abspath(''), 'models', flags.model_name)
    clip, forward_fc_filters, tconv_Fnums, tconv_dims, tconv_filters,  n_filter, n_branch, \
    reg_scale, backward_fc_filters, conv1d_filters, conv_channel_list, batch_size = network_helper.get_parameters(ckpt_dir)
    print(ckpt_dir)
    # initialize data reader
    if len(tconv_dims) == 0:
        output_size = fc_filters[-1]
    else:
        output_size = tconv_dims[-1]
    features, labels, train_init_op, valid_init_op = data_reader.read_data(
        input_size=flags.input_size,
        output_size=output_size - 2 * clip,
        x_range=flags.x_range,
        y_range=flags.y_range,
        geoboundary=flags.geoboundary,
        cross_val=flags.cross_val,
        val_fold=flags.val_fold,
        batch_size=batch_size,
        shuffle_size=flags.shuffle_size,
        data_dir=flags.data_dir,
        normalize_input=flags.normalize_input,
        test_ratio=test_ratio)  #Test ratio < 0 means manual pick of test set

    #if the input is normalized
    if flags.normalize_input:
        flags.boundary = [-1, 1, -1, 1]

# make network
    ntwk = Tandem_network_maker.TandemCnnNetwork(
        features,
        labels,
        model_maker.tandem_model,
        batch_size,
        clip=clip,
        forward_fc_filters=forward_fc_filters,
        backward_fc_filters=backward_fc_filters,
        reg_scale=reg_scale,
        learn_rate=flags.learn_rate,
        tconv_Fnums=tconv_Fnums,
        tconv_dims=tconv_dims,
        n_branch=n_branch,
        tconv_filters=tconv_filters,
        n_filter=n_filter,
        decay_step=flags.decay_step,
        decay_rate=flags.decay_rate,
        geoboundary=flags.boundary,
        conv1d_filters=conv1d_filters,
        conv_channel_list=conv_channel_list)
    # evaluate the results if the results do not exist or user force to re-run evaluation
    save_file = os.path.join(os.path.abspath(''), 'data',
                             'test_pred_{}.csv'.format(flags.model_name))
    if flags.force_run or (not os.path.exists(save_file)):
        print('Evaluating the model ...')
        pred_file, truth_file = ntwk.evaluate(valid_init_op,
                                              ckpt_dir=ckpt_dir,
                                              model_name=flags.model_name,
                                              write_summary=True,
                                              eval_forward=eval_forward)
    else:
        pred_file = save_file
        truth_file = os.path.join(os.path.abspath(''), 'data',
                                  'test_truth.csv')

    TK.record(write_number=int(22000 * test_ratio))
    mae, mse = compare_truth_pred(pred_file, truth_file)

    if (plot_histo):
        plt.figure(figsize=(12, 6))
        plt.hist(mse, bins=100)
        plt.xlabel('Mean Squared Error')
        plt.ylabel('cnt')
        plt.suptitle('Tandem (Avg MSE={:.4e})'.format(np.mean(mse)))
        plt.savefig(
            os.path.join(os.path.abspath(''), 'data',
                         'tandem_{}.png'.format(flags.model_name)))
        plt.show()
    print('Tandem (Avg MSE={:.4e})'.format(np.mean(mse)))