コード例 #1
0
def Datatest():
    """
    :param test_essay:
    :return: Create csv file for every set
    """
    test_essay = pd.read_csv(
        "/mnt/1f2870f0-1578-4534-b33f-0817be64aade/projects/Hackerearth/incedo_nlpcadad7d/incedo_participant/test_dataset.csv"
    )
    essay_Set = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]
    c = 0
    for i in essay_Set:
        if i > 0:
            c = c + 1
            set_filter = (test_essay.Essayset == i)
            id_list = ['ID']
            id_list.extend(list(test_essay[set_filter]['ID']))
            average_word_length, no_of_word, no_of_sentence = no_of_words(
                test_essay[set_filter]['EssayText'])
            processed_essay = preprocessdata(
                test_essay[set_filter]['EssayText'])
            flesch_score, gunning_index, kincaid_grade, liau_index, automated_readability_index, dale_readability_score, difficult_word, linsear_write = seven_test(
                processed_essay)  #18
            count_misspell = Spelling_mistake(processed_essay)
            Flesch_score_list = calculate_Flesch_Score(
                test_essay[set_filter]['EssayText'])
            count_clause_word = Clauseword(processed_essay)
            list_of_pos_tag = PosTagging(processed_essay)
            count_of_NN = NN_Extraction(list_of_pos_tag)
            count_of_NNP = NNP_Extraction(list_of_pos_tag)
            count_of_verb = VERB_Extraction(list_of_pos_tag)
            count_of_adverb = ADVERB_Extraction(list_of_pos_tag)
            count_of_adjective = ADJECTIVE_Extraction(list_of_pos_tag)
            #count_of_deteminers = DETERMINERS_Extraction(list_of_pos_tag)
            clarity = clarity_list(test_essay[set_filter])
            coherant = coherant_list(test_essay[set_filter])
            #tfidf_score = calculate_tfidf(test_essay,i)
            complete_data = []
            list_column = [
                count_of_NN, count_of_NNP, count_of_verb, count_of_adverb,
                flesch_score, count_of_adjective, count_misspell, clarity,
                coherant, Flesch_score_list, count_clause_word, gunning_index,
                dale_readability_score, linsear_write, average_word_length,
                no_of_word, no_of_sentence
            ]
            for i in list_column:
                complete_data.append(i)
            store_csv(complete_data, c)
コード例 #2
0
    # fig_path
    fig_path = r'./out_put/figure'
    mkdir(fig_path)

    from fcn_test import myFcn
    myfcn = myFcn(component=1)
    model = myfcn.get_fcn()
    # result file path
    label_predpath = r'../data/predict/conv_pre/test'
    mkdir(label_predpath)
    result_load_path = r'./out_put'
    mkdir(result_load_path)
    # the number of train and test
    trainnum, testnum = 0, 300
    # the length of samples of one trace,the components of the data
    img_cols, component = 4992, 1
    # load data
    data, label = preprocessdata(data_load_path, trainnum, testnum, img_cols,
                                 component, 'predict')
    # load model
    model = loadModel(model_load_path)
    # predict label
    label_pred = model.predict(data, batch_size=32, verbose=0)
    savemat(data_load_path, testnum, data)
    # save result
    save_result(result_load_path, label_pred, parameter1, parameter2)
    save_label_pred(result_load_path, data, label_pred, label_predpath)
    # show label
    show_label(label, label_pred, [], fig_path)
    show_data_and_label(data, label_pred, [], fig_path)
コード例 #3
0
if __name__ == '__main__':
	output_path = r'./test'
	mkdir(output_path)
	filepath=[r'./data/train']
	trainnum,testnum = 750, 250
	img_cols = 4992    # the length of samples of one trace
	component = 1     # the components of the data
	save_img_path = output_path + r'/train_curves.png'
	log_path = output_path + r'/Log.txt'
    # ---------------
	batch_size = 32
	nb_epoch = 500
    # you still need to modify the data in GaussDirtribution_2D of preprocess
    #----------------
    # 2 Load data
	train_data, train_label, test_data, test_label = preprocessdata(filepath,trainnum,testnum,img_cols,component)
    # 3 Load model
    #myunet = myUnet(img_cols = img_cols,component = component)
    #model = myunet.get_une()
	myfcn = myFcn(img_cols = img_cols,component = component)
	model = myfcn.get_fcn()
    # 4 Train model
	print('Fitting model...')
	model_checkpoint = ModelCheckpoint(output_path+'/unet.hdf5', monitor='loss',verbose=1, save_best_only=True)
	history = LossHistory()
	try:
		hist=model.fit(train_data, train_label, batch_size = batch_size, epochs = nb_epoch, verbose=2, validation_data=(test_data, test_label), callbacks=[model_checkpoint,history])
	finally:
        # 5 Print the training result
		history.loss_plot('epoch',save_img_path)
        # 6 save the result