def main(args):
    
    #Getting all training reports for analysis and creating json dictionary of information on file. 
    train_reports=gen_file_lst(args.raw_results_dir)
    train_report_detail=extract_model_type(train_reports)
    
    with open(args.haralick_txt_params,'r') as fb:
        haralick_params=json.load(fb)
    #
    trn_image_dict = read_data(args.train_data_dir)
    tst_image_dict = read_data(args.test_data_dir)
    
    #Iterating through reports for analysis
    for data_combos in train_report_detail:
        data_combos['model_type']='svm_sgd'
        #Generate training numpy arrays for analysis
        #ipdb.set_trace()
        X_train, y_train = create_dataset(trn_image_dict,haralick_params,args.text_dir,data_combos['model_type'])
        X_test, y_test= create_dataset(tst_image_dict,haralick_params,args.text_dir,data_combos['model_type'])
            
        scaling = MinMaxScaler(feature_range=(0,1)).fit(X_train)
        X_train = scaling.transform(X_train)
        X_test = scaling.transform(X_test)
 
        #load data for analysis into dataframe
        tmp_arr_dict=np.load(data_combos['path'],allow_pickle=True)
        tmp_arr_df=tmp_arr_dict.item().get('cv_results_')
        tmp_arr_df=pd.DataFrame.from_dict(tmp_arr_df)
        tmp_arr_df['params'].apply(pd.Series)
        
        #Perform analysis for generating 
        tmp_arr_df.sort_values('rank_test_score',ascending=True,inplace=True)
        trl_arr_df_params_lst=tmp_arr_df['params'][:5].tolist()
        #Restructure file name for analysis
        #ipdb.set_trace()
        if data_combos['model_type']!='svm_sgd':

            model_params_reformat=reformat_model_params(trl_arr_df_params_lst)
        else:
            model_params_reformat=trl_arr_df_params_lst
        #ipdb.set_trace()
        #Taking the top 5 performers forward for running analysis with training and testing curves. 
        for vals in model_params_reformat:
            #Generating detailed tile for model performance.
            title2='_'.join(['_'.join((k,str(v))) for k,v in vals.items()])
            title1='_'.join([v for k,v in data_combos.items() if k!='path'])
            title=title1+'_'+title2
            
            tmp_estimator=gen_estimator(data_combos['model_type'],vals)
            
            tmp_fig=plot_learning_curve(tmp_estimator, title, X_train, y_train,
                                        cv=3,n_jobs=-1)
            #Save figure for analysis
            dst_dir_f=os.path.join(args.dest_dir,title+'.jpeg')
            tmp_fig.savefig(dst_dir_f)
Exemplo n.º 2
0
def predict():
    file = request.values['file']
    select = request.form.get('comp_select')

    dataset = train.read_data(file)
    training_set, test_set = train.train_test_split(dataset)
    train.sc.fit(training_set)

    test_inputs = pred.test_inputs(60, dataset, test_set)
    test_inputs = train.sc.transform(test_inputs)
    X_test = pred.test_prep(60, test_inputs, test_set)  # (251, 60, 1)

    model = load_model('models/' + str(select))

    predicted_stock_price = model.predict(X_test)
    predicted_stock_price = train.sc.inverse_transform(predicted_stock_price)

    rmse = pred.return_rmse(test_set, predicted_stock_price)
    pred.plot_predictions(test_set,
                          predicted_stock_price,
                          nameURL='static/' + str(select).replace('.h5', "") +
                          '.png')
    # the image URL to pass on to predict.html, it has to be in a 'static' folder
    imgname = 'static/' + str(select).replace('.h5', '.png')
    return render_template('predict.html',
                           model=select,
                           rmse=rmse,
                           imgname=imgname)
def main(args):

    img_dict = read_data(args.input_dir)

    img_avrg_dim = det_avrg_img_size(img_dict)

    img_resize_dict = resize_imgs(img_dict, img_avrg_dim)

    write_output_img(img_resize_dict, args)
Exemplo n.º 4
0
    def prepare_date(self):
        from train import read_data_from_csv, read_csv, read_data

        testfile = os.path.join(os.path.abspath(self.config.data_paths),
                                'test.txt')
        self.test_token, self.test_tags = read_data(testfile)

        self.data_helper = DataHelper(None, None, None, isTrain=False)
        self.data_helper.load_data_from_file(self.config.checkpoint_path)
Exemplo n.º 5
0
from model import Graph
from train import read_data, extract_entity

load_model_path = 'output/subject_model.weights'
train_data, dev_data, test_data, id2class, class2id = read_data()
_, test_model = Graph(0, 0, 0, 0)
test_model.load_weights(load_model_path)


def predict(content, cls):
    return extract_entity(content, cls, class2id, test_model)


if __name__ == '__main__':
    while 1:
        content = input('content: ')
        cls = input('cls: ')
        res = predict(content, cls)
        print(res)
Exemplo n.º 6
0
    ax = fig.add_subplot(111)
    
    attention = attention.squeeze(1).cpu().detach().numpy()
    
    cax = ax.matshow(attention, cmap='bone')
    ax.tick_params(labelsize=15)
    ax.set_xticklabels([''] + ['<sos>'] + [t for t in _tokenizer(article)] + ['<eos>'], 
                       rotation=45)
    ax.set_yticklabels([''] +title)
    ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
    ax.yaxis.set_major_locator(ticker.MultipleLocator(1))

    plt.show()


ARTICLE, TITLE, train,test = read_data()

INPUT_DIM = len(ARTICLE.vocab)
OUTPUT_DIM = len(TITLE.vocab)

ENC_EMB_DIM = 512
DEC_EMB_DIM = 512
ENC_HID_DIM = 256
DEC_HID_DIM = 256
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5
PAD_IDX = ARTICLE.vocab.stoi['<pad>']
SOS_IDX = TITLE.vocab.stoi['<sos>']
EOS_IDX = TITLE.vocab.stoi['<eos>']
attn = Attention(ENC_HID_DIM, DEC_HID_DIM)
encoder = models.Encoder(INPUT_DIM, ENC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, ENC_DROPOUT)
Exemplo n.º 7
0
    plt.xlabel('Time')
    plt.ylabel('IBM Stock Price')
    plt.legend()
    plt.show()


def return_rmse(test, predicted):
    rmse = math.sqrt(mean_squared_error(test, predicted))
    print("The root mean squared error is {}.".format(rmse))


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Do something.")
    #optional arguments are ID-ed by the - prefix, and the remaining arguments are assumed to be positional
    parser.add_argument("--path", "-file_path")
    parser.add_argument("--model", "-model")
    args = parser.parse_args(sys.argv[1:])
    dataset = train.read_data(args.path)
    training_set, test_set = train.train_test_split(dataset)
    train.sc.fit(training_set)

    test_inputs = test_inputs(60, dataset)
    test_inputs = train.sc.transform(test_inputs)
    X_test = test_prep(60, test_inputs)  # (251, 60, 1)

    model = load_model(args.model)
    predicted_stock_price = model.predict(X_test)
    predicted_stock_price = train.sc.inverse_transform(predicted_stock_price)
    return_rmse(test_set, predicted_stock_price)
    plot_predictions(test_set, predicted_stock_price)
Exemplo n.º 8
0
encode_features = False
graphite_relu = True
z_agg = 'mean'
dropout = 0.
load_path = None

num_gen_samples = 100
num_gen_conditions = 10
num_images_per_condition = 1
final_num_gen_samples = 1000
final_num_gen_conditions = 1000
final_num_images_per_condition = 10
evals_to_stop = 100
eval_only = False

preprocessed_data, xa_mappings = read_data(dataset, num_train_examples, sample_train_randomly, identity_features,
                                           num_bond_types, num_atom_types, num_max_nodes)

for lr, gcn_layer, autoregressive, gcn_relu, gcn_batch_norm, graphite_layer, z_dim, use_pos_weight in \
    product(v_lrs, v_gcn_layers, v_autoregressives, v_gcn_relus, v_gcn_batch_norms, v_graphite_layers, v_z_dims, v_use_pos_weights):
    gcn_hiddens = []
    gcn_aggs = []
    gcn_relus = []
    for l in range(gcn_layer):
        gcn_hiddens.append(z_dim)
        gcn_aggs.append('mean')
        gcn_relus.append(True if gcn_relu == 'all' or (gcn_relu == 'last' and l == gcn_layer - 1) else False)

    train(dataset, num_train_examples, sample_train_randomly, identity_features, num_bond_types, num_atom_types,
          num_max_nodes, model_type, lr, epochs, autoregressive, encode_features, gcn_batch_norm, gcn_hiddens,
          gcn_aggs, gcn_relus, graphite_relu, graphite_layer, z_dim, z_agg, dropout, num_gen_samples,
          num_gen_conditions, evals_to_stop, eval_only, load_path, use_pos_weight, preprocessed_data, xa_mappings,