Пример #1
0
def run():
    '''Wrapper function.'''
    # parse command line arguments
    arg_parser = build_arg_parse()
    args = arg_parser.parse_args()
    # get output file path
    output_loc = args.output
    # read in compound noun list
    comp_nouns = tools.read_from_file(args.input)
    if comp_nouns:
        # show process bar
        if args.verbose:
            print 'Segmented Compound Noun Parser Version 1.0'
            bar = Bar('Parse nouns\t', max=len(comp_nouns))
        (results, bar) = parallel_parse_helper(comp_nouns, args.max_workers,
                                               bar, args.verbose)
        if args.verbose:
            bar.finish()
        # write results to file
        if args.verbose:
            print 'Writing parse results to {}'.format(output_loc)
        if results:
            tools.write_results(results, output_loc)
            # write dot file
            if args.dot:
                dot_write.write(results, args.dot_dir, args.verbose)
Пример #2
0
    def evaluate(self, args, split):

        logging.info("Start evaluation")
        with tf.Session(config=tools.get_config_proto(args.gpu_memory_fraction)) as sess:

            assert type(self.saver) == tf.train.Saver, 'Saver is not correctly initialized'
            # Initialization:
            self.initialize(sess, args)
            # Process all data:
            logging.info('Computing metrics on ' + split + ' data')
            initime = time.time()
            sess.run(self.reader.get_init_op(split))
            nbatches = self.reader.get_nbatches_per_epoch(split)
            step = 0
            all_predictions = []
            all_labels = []
            all_names = []
            while True:
                try:
                    predictions, labels, names, images = sess.run([self.predictions, self.labels, self.filenames, self.inputs], {self.is_training: False})
                    all_predictions.extend(predictions)
                    all_labels.extend(labels)
                    all_names.extend(names)

                    if args.save_input_images:
                        tools.save_input_images(names, images, args, 1, step, self.reader.img_extension)
                    step += 1

                except tf.errors.OutOfRangeError:
                    break

                if step % args.nsteps_display == 0:
                    print('Step %i / %i' % (step, nbatches))

            metrics = accuracy.compute_accuracy(all_predictions, all_labels)
            fintime = time.time()
            logging.debug('Done in %.2f s' % (fintime - initime))
            logging.info(split + ' accuracy: %.2f' % metrics)

            # Write results:
            tools.write_results(all_predictions, all_labels, all_names, self.classnames, args)

        return metrics
Пример #3
0
    confusion_matrix.reset()

    with torch.no_grad():
        model.eval()
        mydataset_val = DataLoader(val_dataset, batch_size=14, shuffle=True)
        val_losses = []

        for i, batch, in enumerate(tqdm(mydataset_val)):
            img_batch, lbl_batch = batch
            img_batch, lbl_batch = tools.to_cuda(img_batch), tools.to_cuda(
                lbl_batch)

            output, rec = model(img_batch.float())
            loss1 = criterion1(output, lbl_batch.long())
            loss2 = criterion2(rec, img_batch.float())
            loss = tools.to_cuda(0.9 * loss1 + 0.1 * loss2)
            val_losses.append(loss.item())
            output_conf, target_conf = tools.conf_m(output, lbl_batch)
            confusion_matrix.add(output_conf, target_conf)

        print(confusion_matrix.conf)
        testAccuracy = (np.trace(confusion_matrix.conf) /
                        float(np.ndarray.sum(confusion_matrix.conf))) * 100
        print('VAL_LOSS: ', '%.3f' % np.mean(val_losses), 'VAL_ACC: ',
              '%.3f' % testAccuracy)
        tools.write_results(ff, save_folder, epoch, train_acc, testAccuracy,
                            np.mean(train_losses), np.mean(val_losses))
        if epoch % 5 == 0:
            torch.save(model.state_dict(),
                       './' + save_folder + '/model_{}.pt'.format(epoch))
                             100. * cnt / len(val_IDSv), loss.item()))
            cnt = cnt + 1
            ######################################################################

            del (inputs, targets, retargets, loss)

    print(confusion_matrix.conf)
    val_Loss = np.mean(val_losses)
    v_Loss.append(val_Loss)
    val_acc = (np.trace(confusion_matrix.conf) /
               float(np.ndarray.sum(confusion_matrix.conf))) * 100
    v_acc.append(val_acc)
    print('VAL_OA', '%.3f' % val_acc)
    print('Train_Loss: ', np.mean(train_Loss))
    print('Val_Loss: ', np.mean(val_Loss))
    tools.write_results(train_Loss, val_Loss, train_acc, val_acc, epoch)
    torch.save(model.state_dict(), './models/model_{}.pt'.format(epoch))
    confusion_matrix.reset()

plt.figure(1)
plt.plot(t_Loss)
plt.plot(v_Loss)
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train loss', 'val loss'], loc='upper left')
plt.savefig("plots/Myfile.png", format="png")

plt.figure(2)
plt.plot(t_acc)
plt.plot(v_acc)
                     center[0]-templatesize:center[0]+templatesize,:]
    
    y = center[1]-searchingsize
    x = center[0]-searchingsize

    
    #read a new frame
    Frame = imread(folder + filenamestart + str(i) + '.png')
    SearchingZone = Frame[center[1]-searchingsize:center[1]+searchingsize,
                          center[0]-searchingsize:center[0]+searchingsize,:]
    
    #processing
    template = tls.ImgCannyGaussian(template)
    SearchingZone = tls.ImgCannyGaussian(SearchingZone)
    
    try: #We may be drifting out of the frame here, in this case we can't update our center
        #fit template on next frame
        ij = tls.Match(SearchingZone, template, index, ij)#, pad_input = True) 
    
        center =(ij[0]+x+templatesize,ij[1]+y+templatesize,0)
        listCenters.append(("{0}{1}.png".format(filenamestart, i),center[0],center[1]))
    except ValueError:
        print("You seem to be out of bonds")
        listCenters.append(("{0}{1}.png".format(filenamestart, i),"null","null"))

    
    print(center)


tls.write_results(listCenters, "results{0}.txt".format(Set))
# Get predicted classes
model_rbe = learn(data_rbe,splitted_rbe[0],splitted_rbe[1],splitted_rbe[2],splitted_rbe[3],['RBE'])
model_alpha = learn(data_alpha,splitted_alpha[0],splitted_alpha[1],splitted_alpha[2],splitted_alpha[3],['alpha_l'])
model_beta = learn(data_beta,splitted_beta[0],splitted_beta[1],splitted_beta[2],splitted_beta[3],['beta_l'])

#Catboost Predictions
preds_rbe=model_rbe.predict(splitted_rbe[2])
preds_alpha=model_alpha.predict(splitted_alpha[2])
preds_beta=model_beta.predict(splitted_beta[2])

a = tools.scores(splitted_rbe[3],preds_rbe,'catboost_rbe')
b = tools.scores(splitted_alpha[3],preds_alpha,'catboost_alpha')
c = tools.scores(splitted_beta[3],preds_beta,'catboost_beta')
catboost_results = [a[0],a[1],b[0],b[1],c[0],c[1]]
tools.write_results(catboost_results,'Catboost')

# save catboost models
CatBoostRegressor.save_model(model_alpha,'./models/catboost_alpha.sav')
CatBoostRegressor.save_model(model_beta,'./models/catboost_beta.sav')
CatBoostRegressor.save_model(model_rbe,'./models/catboost_rbe.sav')

# importances
def importances(data, model,text):
    importance = model.get_feature_importance()
    fig, axs = plt.subplots(1, 1, figsize=(9, 9), sharey=True)
    names = list(data)
    axs.bar(names, importance)
    fig.suptitle('importance_'+text+'_plot')
    plt.savefig('./plots/importance_'+text+'.png')
    plt.clf()
    svr = SVR(gamma='scale', C=C, epsilon=e)
    model = svr.fit(a[0], a[1])
    joblib.dump(model, './models/' + name + '_model.sav')
    preds = model.predict(a[2])
    return tools.scores(a[3], preds, name)


# get the data from csv file
data = pd.read_csv('./data/pide.csv',
                   usecols=range(3, 16)).drop(['alpha_x', 'beta_x'], axis=1)
data_rbe = pd.read_csv('./data/data_rbe.csv')

dump_encoder(data_rbe, 'RBE')
dump_encoder(data, 'quadratic')

rbe = tools.preprocess_data(data_rbe, ['RBE'])
quadratic_alpha = tools.preprocess_data(data.drop(['beta_l'], axis=1),
                                        ['alpha_l'])
quadratic_beta = tools.preprocess_data(data.drop(['alpha_l'], axis=1),
                                       ['beta_l'])

svm_rbe_results = svm_regression(rbe, 'svr_rbe', 100000, 0.06)
svm_alpha_results = svm_regression(quadratic_alpha, 'svr_alpha', 100000, 0.06)
svm_beta_results = svm_regression(quadratic_beta, 'svr_beta', 100000, 0.06)
# # 100000
svr_results = [
    svm_rbe_results[0], svm_rbe_results[1], svm_alpha_results[0],
    svm_alpha_results[1], svm_beta_results[0], svm_beta_results[1]
]
tools.write_results(svr_results, 'SVR')
Пример #8
0
        for i, batch, in enumerate(tqdm(mydataset_val)):
            img_batch, lbl_batch = batch
            img_batch, lbl_batch = tools.to_cuda(
                img_batch.permute(1, 0, 2, 3, 4)), tools.to_cuda(lbl_batch)

            output = model(img_batch.float())
            loss = criterion(output, lbl_batch.long())
            val_losses.append(loss.item())
            output_conf, target_conf = tools.conf_m(output, lbl_batch)
            confusion_matrix.add(output_conf, target_conf)

        print(confusion_matrix.conf)
        test_acc = (np.trace(confusion_matrix.conf) /
                    float(np.ndarray.sum(confusion_matrix.conf))) * 100
        change_acc = confusion_matrix.conf[1, 1] / float(
            confusion_matrix.conf[1, 0] + confusion_matrix.conf[1, 1]) * 100
        non_ch = confusion_matrix.conf[0, 0] / float(
            confusion_matrix.conf[0, 0] + confusion_matrix.conf[0, 1]) * 100
        print('VAL_LOSS: ', '%.3f' % np.mean(val_losses), 'VAL_ACC:  ',
              '%.3f' % test_acc, 'Non_ch_Acc: ', '%.3f' % non_ch,
              'Change_Accuracy: ', '%.3f' % change_acc)
        confusion_matrix.reset()

    tools.write_results(ff, save_folder, epoch, train_acc, test_acc,
                        change_acc, non_ch, np.mean(train_losses),
                        np.mean(val_losses))
    if epoch % 5 == 0:  # save model every 5 epochs
        torch.save(model.state_dict(),
                   './' + save_folder + '/model_{}.pt'.format(epoch))
Пример #9
0
        repeat = 10
        dfs_rt_results.append(run_time_anyalsis(dfs_topological_sort, 
            graph, number, repeat))
        sr_rt_results.append(run_time_anyalsis(source_removal_topological_sort, 
            graph, number, repeat))

    # comment out if you want to create the graph without the final point
    # plot_name = '''Compare topological sort: dfs(red) vs source removal(blue):
# min of {0} runs'''.format(repeat)
    # dfs_plot_type = 'ro'
    # rt_plot_type = 'bo'
    # dfs_package = ('depth-first-search',  edge_counts, dfs_rt_results, dfs_plot_type)
    # rt_package = ('source-removal',  edge_counts, sr_rt_results, rt_plot_type)
    # line_plot_compare((dfs_package, rt_package), plot_name, 'edges', 'seconds')

    # it seems reasonable to try and plot without the final and larger point
    # UN COMMENT to create 
    plot_name = '''without final points topological sort: dfs(red) vs source removal(blue):
min of {0} runs'''.format(repeat)
    dfs_plot_type = 'ro'
    rt_plot_type = 'bo'
    dfs_package_small = ('depth-first-search',  edge_counts[0:-1], dfs_rt_results[0:-1], dfs_plot_type)
    rt_package_small = ('source-removal',  edge_counts[0: -1], sr_rt_results[0:-1], rt_plot_type)
    line_plot_compare((dfs_package_small, rt_package_small), plot_name, 'edges', 'seconds')


    write_results(edge_counts, dfs_rt_results, sr_rt_results, 'runtimes.txt')