Ejemplo n.º 1
0
def worker():
	global queue
	while True:
		try:
			item = queue.get()
			print 'Proccessing ' + item['filename']
			if item['type'] == 'pcap':
				parse_pcap.parse_pcap(get_data_path()+item['filename'])
			elif item['type'] == 'log':
				parse_log.parse_log(get_data_path()+item['filename'])
		except Exception, e:
			print e
		finally:
Ejemplo n.º 2
0
def get_log():
    call("svn log --verbose --xml https://subversion.ews.illinois.edu/svn/fa16-cs242/ywang443 > app/svn_log.xml",
         shell=True)
    project_names = []
    filename = 'app/svn_log.xml'
    info = parse_log(filename)
    return jsonify(info.items())
Ejemplo n.º 3
0
def update(num):
    logfile_path = args.logfile_path
    if '@' in args.logfile_path:
        localfile_path = os.path.join(dirname, str(os.getpid())+"_temp.log")
        getLogFilefromRemote(localfile_path)
        logfile_path = localfile_path
    train_dic_list, test_dic_list = log_parser.parse_log(logfile_path)
    if len(train_dic_list)<=0:
        return line_loss, line_eval
    #train_dic_list = train_dic_list[0:counter]
    if args.type == 0:
        x_axis_field = "NumIters"
        y_axis_field = "mbox_loss"
        y_eval_field = "detection_eval"
    train_data = [[i[x_axis_field] for i in train_dic_list],
                    [i[y_axis_field] for i in train_dic_list]]
    test_data = [[i[x_axis_field] for i in test_dic_list], [i[y_eval_field] for i in test_dic_list]]
    xmin, xmax = ax_loss.get_xlim()
    if train_data[0][-1] >= xmax:
        ax_loss.set_xlim(xmin,int(train_data[0][-1]*1.3))
        ax_loss.figure.canvas.draw()
    ymin, ymax = ax_loss.get_ylim()
    if train_data[1][0] >= ymax:
        ax_loss.set_ylim(ymin,train_data[1][0]+10)
        ax_loss.figure.canvas.draw()
    
    line_loss.set_data(train_data[0], train_data[1])
    line_eval.set_data(test_data[0], test_data[1])
    return line_loss, line_eval
Ejemplo n.º 4
0
def plotTrainingLossAndAccuracy(trainingLogPath, evaluationTargetPath):

   trainingLog, testLog = pl.parse_log(trainingLogPath)
   # logger.debug(testLog[1])

   trainingData = []
   for item in trainingLog:
      trainingData.append([item['NumIters'], item['loss']])
   testData = []
   for item in testLog:
      testData.append([item['NumIters'], item['loss'], item['accuracy']])

   trainingData = np.array(trainingData)
   testData = np.array(testData)


   trainingLog = np.array(trainingLog)
   testLog = np.array(testLog)

   # logger.debug(trainingLog.shape)
   # logger.debug(testLog.shape)


   iterationMaximum = ITERATION_MAXIMUM
   counter = 0
   while counter < ITERATION_VARIATIONS:

      fig, ax1 = plt.subplots()
      trainingLossPlot, = ax1.plot(trainingData[:,0], trainingData[:,1], color='r', label='Training set loss')
      testLossPlot, = ax1.plot(testData[:,0], testData[:,1], label='Test set loss', color='b')

      ax1.set_xlabel('Iterations')
      ax1.set_ylabel('Loss')

      ax2 = ax1.twinx()
      accuracyPlot, = ax2.plot(testData[:,0], testData[:,2], label='Test set accuracy', color='g')
      ax2.set_ylabel('Accuracy')

      ax1.axis([0, iterationMaximum, 0, LOSS_MAXIMUM])
      ax2.axis([0, iterationMaximum, 0, 1])
      ax1.set_xticks(np.arange(0,iterationMaximum + 1, iterationMaximum * 0.1))
      ax1.set_xticklabels(np.arange(0,iterationMaximum + 1, iterationMaximum  * 0.1), rotation=45)
      ax1.set_yticks(np.arange(0, LOSS_MAXIMUM, float(LOSS_MAXIMUM) / 10))
      ax2.set_yticks(np.arange(0, 1, float(1) / 10))

      ax1.grid(True)
      ax2.grid(True)

      # plt.title(evaluationTargetPath)

      plt.legend([trainingLossPlot, testLossPlot, accuracyPlot], [trainingLossPlot.get_label(), testLossPlot.get_label(), accuracyPlot.get_label()], bbox_to_anchor=(1.1, 1), loc=2, borderaxespad=0.)

      plt.savefig(evaluationTargetPath +'lossAndAccuracy_' + str(iterationMaximum) + '.pdf', bbox_inches='tight')

      plt.close()

      iterationMaximum = int(iterationMaximum * 0.5)

      counter += 1
Ejemplo n.º 5
0
def plot_chart(chart_type, path_to_png, path_to_log_list):
    for path_to_log in path_to_log_list:
        #print "path_to_log %s" % path_to_log
        #comment this function
        #os.system('%s %s' % (get_log_parsing_script(), path_to_log))

        #extract train and test data respectively.==========================================
        train_dict_list, test_dict_list = parse_log.parse_log(path_to_log)
        (filepath, log_basename) = os.path.split(path_to_log)
        #print('filepath' + filepath)
        parse_log.save_csv_files(path_to_log, filepath + '/', train_dict_list,
                                 test_dict_list)
        #====================================================================

        data_file = filepath + '/' + get_data_file(chart_type, path_to_log)
        x_axis_field, y_axis_field = get_field_descriptions(chart_type)
        x, y = get_field_indices(x_axis_field, y_axis_field)
        data = load_data(data_file, x, y)
        ## TODO: more systematic color cycle for lines
        color = [random.random(), random.random(), random.random()]
        label = get_data_label(path_to_log)
        linewidth = 0.75
        ## If there too many datapoints, do not use marker.
        ##        use_marker = False
        use_marker = True
        if not use_marker:
            plt.plot(data[0],
                     data[1],
                     label=label,
                     color=color,
                     linewidth=linewidth)
        else:
            ok = False
            ## Some markers throw ValueError: Unrecognized marker style
            while not ok:
                try:
                    marker = random_marker()
                    plt.plot(data[0],
                             data[1],
                             label=label,
                             color=color,
                             marker=marker,
                             linewidth=linewidth)
                    ok = True
                except:
                    pass
    legend_loc = get_legend_loc(chart_type)
    plt.legend(loc=legend_loc, ncol=1)  # ajust ncol to fit the space
    plt.title(get_chart_type_description(chart_type))
    plt.xlabel(x_axis_field)
    plt.ylabel(y_axis_field)
    plt.savefig(path_to_png)
def get_min_val_loss(log_file):

    [train_dict, val_dict] = parse_log(log_file)

    # print([train_dict, val_dict])
    min_loss = 100000
    iterations = 0
    for t in range(len(val_dict)):
        loss_value = val_dict[t]['loss']
        if min_loss > loss_value:
            min_loss = loss_value
            iterations = int(val_dict[t]['NumIters'])

    return [min_loss, iterations]
Ejemplo n.º 7
0
def plot_log(log_path, save_fig, show_fig):
    train_dict, val_dict, number_of_correspondences = parse_log(log_path)

    train_keys = ['seg_cs_loss', 'seg_extra_loss']
    train_labels = ['Cityscapes', 'Extra']
    if 'Vistas' in val_dict:
        train_keys.append('seg_vis_loss')
        train_labels.append('Vistas')

    plt.figure(1)
    plt.subplot(221)
    plt.plot(train_dict['iter'], train_dict['corr_loss'])
    plt.xlabel('iteration')
    plt.title('corr loss')

    plt.subplot(222)
    for train_key, train_label in zip(train_keys, train_labels):
        plt.plot(train_dict['iter'], train_dict[train_key], label=train_label)
    plt.legend()
    plt.xlabel('iteration')
    plt.title('seg loss')

    plt.subplot(223)
    for k, v in val_dict.items():
        plt.plot(v['iter'], v['acc'], label=k)
    plt.legend()
    plt.xlabel('iteration')
    plt.ylabel('acc')
    plt.title('validation')

    plt.subplot(224)
    for k, v in val_dict.items():
        plt.plot(v['iter'], v['mean_iu'], label=k)
    plt.legend()
    plt.xlabel('iteration')
    plt.ylabel('mIoU')
    plt.title('validation')

    plt.tight_layout()
    if show_fig:
        plt.show()
    if save_fig:
        plot_path = os.path.join(
            os.path.dirname(
                os.path.realpath(log_path)),
            'log.png')
        print('plot saved as %s' % plot_path)
        plt.savefig(plot_path)
Ejemplo n.º 8
0
    def parse(self):
        
        log_data = pl.parse_log(self.path_log)
        # allow for backwards compatibility
        if len(log_data) == 4:
            self.train_dict_list, self.train_keys, self.test_dict_list, self.test_keys = log_data
        else:
            self.train_dict_list, self.test_dict_list = log_data
            if len(self.train_dict_list) > 0:
                self.train_keys = self.train_dict_list[0].keys()
            else:
                self.train_keys = []
                
            if len(self.test_dict_list) > 0:
                self.test_keys = self.test_dict_list[0].keys()
            else:
                self.test_keys = []

        return self.train_keys, self.test_keys
Ejemplo n.º 9
0
    def parse(self):

        log_data = pl.parse_log(self.path_log)
        # allow for backwards compatibility
        if len(log_data) == 4:
            self.train_dict_list, self.train_keys, self.test_dict_list, self.test_keys = log_data
        else:
            self.train_dict_list, self.test_dict_list = log_data
            if len(self.train_dict_list) > 0:
                self.train_keys = self.train_dict_list[0].keys()
            else:
                self.train_keys = []

            if len(self.test_dict_list) > 0:
                self.test_keys = self.test_dict_list[0].keys()
            else:
                self.test_keys = []

        return self.train_keys, self.test_keys
Ejemplo n.º 10
0
def main():
    parser = argparse.ArgumentParser(
        description='Search log file for errors and post the data to the boss')
    parser.add_argument('--logfile',
                        type=str,
                        default=None,
                        help='log file to parse')
    parser.add_argument('--repeatfile',
                        type=str,
                        default='repeat_cutouts.txt',
                        help='log file to parse')
    args = parser.parse_args()

    if args.logfile is not None:
        args.repeatfile = parse_log(args.logfile, args.repeatfile)

    cutouts = get_cutouts(args.repeatfile)

    iterate_posting_cutouts(cutouts)

    print('Finished all failed cutouts, check logs for errors')
Ejemplo n.º 11
0
def plot_chart(log_file, path_to_png, mode=PLOT_MODE.NORMAL):

    mean_ap = 0
    phases, detected_mean_ap = parse_log(log_file)
    if detected_mean_ap != None:
        mean_ap = detected_mean_ap

    print "Processing %s with mAP=%f" % (path_to_png, mean_ap)

    plt.figure(1, figsize=(8, 32))

    end_phase = min(len(phases), 4)
    for phase_idx in range(0, end_phase):
        phase = np.array(phases[phase_idx])
        plt.subplot(411 + phase_idx)
        label = LABELS[phase_idx]
        plt.title("%s%s" % ("mAP = %f    " % mean_ap if phase_idx == 0 else "",
                            str(label[phase_idx])))

        for x_label, y_label in FIELDS[phase_idx]:
            ## TODO: more systematic color cycle for lines
            color = [random.random(), random.random(), random.random()]
            linewidth = 0.75
            ## If there too many datapoints, do not use marker.
            ##        use_marker = False
            use_marker = True

            # if (mode==PLOT_MODE.MOVING_AVG):

            x_data = [row[x_label] for row in phase]
            y_data = [row[y_label] for row in phase]

            if mode == PLOT_MODE.MOVING_AVG:
                y_data = moving_average(y_data, 100)
            elif mode == PLOT_MODE.BOTH:
                marker = random_marker()
                plt.plot(x_data,
                         y_data,
                         label=label,
                         color=color,
                         marker=marker,
                         linewidth=linewidth)

                color = [random.random(), random.random(), random.random()]
                y_data = moving_average(y_data, 100)

            if not use_marker:
                plt.plot(x_data,
                         y_data,
                         label=label,
                         color=color,
                         linewidth=linewidth)
            else:
                marker = random_marker()
                plt.plot(x_data,
                         y_data,
                         label=label,
                         color=color,
                         marker=marker,
                         linewidth=linewidth)

    #legend_loc = get_legend_loc(chart_type)
    #plt.legend(loc = legend_loc, ncol = 1) # ajust ncol to fit the space
    #plt.xlabel(x_axis_field)
    #plt.ylabel(y_axis_field)

    # plt.annotate(fontsize='xx-small')
    print "Saving...",
    plt.savefig(path_to_png, dpi=600)
    print "done"
    plt.show()
Ejemplo n.º 12
0
def main():
    logfile = sys.argv[1]
    print(logfile)
    logdir = os.path.dirname(logfile)
    print(logdir)
    ''' parse log '''
    train_dict_list, test_dict_list = parse_log.parse_log(logfile)
    ''' save to file '''
    parse_log.save_csv_files(logfile, logdir, train_dict_list, test_dict_list)
    ''' read csv '''
    logtest = logfile + '.test'
    logtrain = logfile + '.train'
    test_data = ReadCSV(logtest)
    train_data = ReadCSV(logtrain)
    print(test_data[0])
    print(train_data[0])
    ''' plot '''
    fig, ax1 = plt.subplots(1, 1, figsize=(15, 10))
    fig.subplots_adjust(right=0.8)

    # ax1
    ax1.plot(test_data[:, 0],
             test_data[:, -1],
             color='blue',
             label="test_loss")
    ax1.plot(train_data[:, 0],
             train_data[:, -1],
             color='green',
             label="train_loss")
    ax1.set_ylabel('loss')
    ax1.set_xlabel('iteration')

    # ax2
    ax2 = ax1.twinx()
    lines = []
    if (len(test_data[0]) > 5):
        acc1, = ax2.plot(test_data[:, 0],
                         test_data[:, 3],
                         color='red',
                         label="accuracy#1")
        acc5, = ax2.plot(test_data[:, 0],
                         test_data[:, 4],
                         color='yellow',
                         label="accuracy#5")
        lines.append(acc1)
        lines.append(acc5)
    else:
        acc1, = ax2.plot(test_data[:, 0],
                         test_data[:, 3],
                         color='red',
                         label="accuracy#1")
        lines.append(acc1)
    ax2.set_ylabel('acurracy')

    # ax3
    ax3 = ax1.twinx()
    ax3.spines['right'].set_position(('axes', 1.1))
    lr, = ax3.plot(train_data[:, 0],
                   train_data[:, 2],
                   color='black',
                   label="LearningRate")
    lines.append(lr)
    ax3.set_ylabel('LearningRate')

    # legend
    ax1.legend()
    ax2.legend(lines, [l.get_label() for l in lines], loc="upper center")

    plt.show()
Ejemplo n.º 13
0
    ]
elif os.path.isfile(sys.argv[1]):
    log_files.append(sys.argv[1])
else:
    print "Invalid file or directory supplied: %s!" % sys.argv[1]
    raise

# Read and average the statistics.
avg_evaluation = None
avg_max_fitness = None
avg_average_fitness = None
avg_species_count = None
avg_node_count = None
avg_link_count = None
for log_file in log_files:
    log = parse_log("%s%s" % (directory, log_file), evaluate_xor2)
    pylab.plot(log.evaluation)
    pylab.show()
    length = len(log.evaluation)
    if avg_evaluation == None:
        avg_evaluation = zeros(length)
        avg_max_fitness = zeros(length)
        avg_average_fitness = zeros(length)
        avg_species_count = zeros(length)
        avg_node_count = zeros(length)
        avg_link_count = zeros(length)
    for i in xrange(length):
        avg_evaluation[i] += log.evaluation[i]
        avg_max_fitness[i] += log.max_fitness[i]
        avg_average_fitness[i] += log.average_fitness[i]
        avg_species_count[i] += log.species_count[i]
Ejemplo n.º 14
0
from parse_log import parse_log
from parse_list import parse_list

if __name__ == '__main__':
    log_file = "svn_log.xml"
    list_file = "svn_list.xml"
    print parse_log(log_file)
    print parse_list(list_file)
Ejemplo n.º 15
0
def main(log_dir, parser_output_dir, rule_dir):
    #print "break point"
    csv_file = parse_log.parse_log(log_dir, parser_output_dir)
    run_predict(csv_file)
    parse_log.extract_average_log(parser_output_dir, rule_dir)
    print "done"
Ejemplo n.º 16
0
    log_files = [file for file in os.listdir(sys.argv[1]) if file.lower().endswith(ext)]
elif os.path.isfile(sys.argv[1]):
    log_files.append(sys.argv[1])
else:
    print "Invalid file or directory supplied: %s!" % sys.argv[1]
    raise

# Read and average the statistics.
avg_evaluation = None
avg_max_fitness = None
avg_average_fitness = None
avg_species_count = None
avg_node_count = None
avg_link_count = None
for log_file in log_files:
    log = parse_log( "%s%s" % (directory, log_file), evaluate_xor2 )
    pylab.plot( log.evaluation )
    pylab.show()
    length = len(log.evaluation)
    if avg_evaluation == None:
        avg_evaluation = zeros(length)
        avg_max_fitness = zeros(length)
        avg_average_fitness = zeros(length)
        avg_species_count = zeros(length)
        avg_node_count = zeros(length)
        avg_link_count = zeros(length)
    for i in xrange(length):
        avg_evaluation[i] += log.evaluation[i]
        avg_max_fitness[i] += log.max_fitness[i]
        avg_average_fitness[i] += log.average_fitness[i]
        avg_species_count[i] += log.species_count[i]
Ejemplo n.º 17
0
def main():
    args = parse_args()
    train_dict_list, train_dict_names, test_dict_list, test_dict_names, debug_info_dict_list, debug_info_names = parse_log(
        os.path.realpath(args.logfile_path))
    interactive_display_test_train(test_dict_list, train_dict_list,
                                   args.acc_layer)

    if debug_info_dict_list:  # only show the rest of the graphs if debug info exists
        with open(os.path.realpath(args.logfile_path)) as f:
            max_param_count = getMaxParamCount(f)
        # Get the layer names used in this net
        layer_name_list = get_layer_names(debug_info_dict_list)

        # Show activation, parameter data and backpropagated gradients per
        # layer and per parameter.
        # Activations
        layer_list = [(layer, 'Activation') for layer in layer_name_list]
        display_results.interactive_plot_layers(layer_list,
                                                debug_info_dict_list,
                                                'Layer Mean Abs Activations')
        # Back-propagated gradients per layer
        layer_list = [(layer, 'BackPropBottomDiff')
                      for layer in layer_name_list]
        display_results.interactive_plot_layers(
            layer_list, debug_info_dict_list,
            'Back-propagated Gradients per Layer')
        # Layer parameter data values
        layer_list = [(layer, 'param' + str(i) + '_Data')
                      for layer in layer_name_list
                      for i in range(max_param_count + 1)]
        display_results.interactive_plot_layers(layer_list,
                                                debug_info_dict_list,
                                                'Layer Mean Abs Data Values')
        # Gradients per layer
        layer_list = [(layer, 'BackPropDiff_param' + str(i))
                      for layer in layer_name_list
                      for i in range(max_param_count + 1)]
        display_results.interactive_plot_layers(layer_list,
                                                debug_info_dict_list,
                                                'Gradient per Parameter')
    plt.show()
Ejemplo n.º 18
0
def plotTrainingLossAndAccuracy(trainingLogPath, evaluationTargetPath):

    trainingLog, testLog = pl.parse_log(trainingLogPath)
    # logger.debug(testLog[1])

    trainingData = []
    for item in trainingLog:
        trainingData.append([item['NumIters'], item['loss']])
    testData = []
    for item in testLog:
        testData.append([item['NumIters'], item['loss'], item['accuracy']])

    trainingData = np.array(trainingData)
    testData = np.array(testData)

    trainingLog = np.array(trainingLog)
    testLog = np.array(testLog)

    # logger.debug(trainingLog.shape)
    # logger.debug(testLog.shape)

    iterationMaximum = ITERATION_MAXIMUM
    counter = 0
    while counter < ITERATION_VARIATIONS:

        fig, ax1 = plt.subplots()
        trainingLossPlot, = ax1.plot(trainingData[:, 0],
                                     trainingData[:, 1],
                                     color='r',
                                     label='Training set loss')
        testLossPlot, = ax1.plot(testData[:, 0],
                                 testData[:, 1],
                                 label='Test set loss',
                                 color='b')

        ax1.set_xlabel('Iterations')
        ax1.set_ylabel('Loss')

        ax2 = ax1.twinx()
        accuracyPlot, = ax2.plot(testData[:, 0],
                                 testData[:, 2],
                                 label='Test set accuracy',
                                 color='g')
        ax2.set_ylabel('Accuracy')

        ax1.axis([0, iterationMaximum, 0, LOSS_MAXIMUM])
        ax2.axis([0, iterationMaximum, 0, 1])
        ax1.set_xticks(
            np.arange(0, iterationMaximum + 1, iterationMaximum * 0.1))
        ax1.set_xticklabels(np.arange(0, iterationMaximum + 1,
                                      iterationMaximum * 0.1),
                            rotation=45)
        ax1.set_yticks(np.arange(0, LOSS_MAXIMUM, float(LOSS_MAXIMUM) / 10))
        ax2.set_yticks(np.arange(0, 1, float(1) / 10))

        ax1.grid(True)
        ax2.grid(True)

        # plt.title(evaluationTargetPath)

        plt.legend([trainingLossPlot, testLossPlot, accuracyPlot], [
            trainingLossPlot.get_label(),
            testLossPlot.get_label(),
            accuracyPlot.get_label()
        ],
                   bbox_to_anchor=(1.1, 1),
                   loc=2,
                   borderaxespad=0.)

        plt.savefig(evaluationTargetPath + 'lossAndAccuracy_' +
                    str(iterationMaximum) + '.pdf',
                    bbox_inches='tight')

        plt.close()

        iterationMaximum = int(iterationMaximum * 0.5)

        counter += 1