コード例 #1
0
ファイル: image_compression.py プロジェクト: akotek/iml_ex1
def main():
    # ---- Init data ----------
    img = misc.ascent()
    U, S, VT = svd(img, full_matrices=False)
    # -------------------------

    # -------- 1st Plot --------
    k_x_axis = []
    y1_forb_dist = []
    y2_comp_ratio = []
    for k in range(len(img)):
        k_x_axis.append(k)
        rec_img, y1, y2 = get_rec_forb_comp(img, U, copy.deepcopy(S), VT, k)
        y1_forb_dist.append(y1)
        y2_comp_ratio.append(y2)

    utils.plot_graph(k_x_axis, "k", y1_forb_dist, "Forb Dist")
    utils.plot_graph(k_x_axis, "k", y2_comp_ratio, "Compression Ratio")
    # -------------------------

    # -------- 2nd Plot --------
    k_image_list = [5, 20, 45, 250, 511]
    image_list = []
    y1_forb_dist = []
    y2_comp_ratio = []
    for k in k_image_list:
        rec_img, y1, y2 = get_rec_forb_comp(img, U, copy.deepcopy(S), VT, k)
        image_list.append(rec_img)
        y1_forb_dist.append(y1)
        y2_comp_ratio.append(y2)

    utils.plot_images_from_list(image_list, k_image_list, y1_forb_dist,
                                y2_comp_ratio)
コード例 #2
0
ファイル: price_prediction.py プロジェクト: akotek/iml_ex1
def main():
    df = load_data("kc_house_data.csv")
    df = pre_process_data(df)

    train_error = []
    test_error = []
    for x in range(1, 100):
        rows = np.random.rand(len(df)) < x / 100
        train = df[rows]
        test = df[~rows]
        y_train = get_col(train, 'price')
        train = remove_cols(train, ['price'])
        w_train = np.dot(get_psuedo_inverse(train), y_train)
        y_hat_train = train.dot(w_train)

        y_test = get_col(test, 'price')
        test = remove_cols(test, ['price'])
        y_hat_test = test.dot(w_train)

        train_error.append(get_rmse(y_train, y_hat_train))
        test_error.append(get_rmse(y_test, y_hat_test))

    utils.plot_graph(list(range(1, 100)),"X", train_error, "mse_train", \
                                      test_error,
                     "mse_test")
コード例 #3
0
def kruskal_test(filename):

    graph = utils.load_undirected_and_unweighted_graph_from_txt_file(filename)
    tree = kruskal(graph)

    tree_graph = nx.Graph()
    tree_graph.add_nodes_from(graph.nodes)
    for u, v, weight in tree:
        tree_graph.add_edge(u, v, weight=weight)

    utils.plot_graph(tree_graph)
コード例 #4
0
def schedule(processes, scheduler):
    processes_count = len(processes)
    scheduled_processes = []
    context_switches = []
    process_switches = []  # The times where the process changes, used to tick X-axis on the bar chart
    time = 0
    next_process = None if len(processes) == 0 else processes[-1]
    while True:
        # Add arriving processes to the scheduler
        while next_process is not None and next_process['arrival_time'] == time:
            process = processes.pop()
            scheduler.add_process(process)
            next_process = None if len(processes) == 0 else processes[-1]

        # Run the scheduled process
        is_running, current_process = scheduler.run_scheduled_process(time)
        # Check for termination case (no more arriving processes & scheduler idle)
        if not is_running and next_process is None:
            process_switches.append(time)
            break
        # Check if context switch or running process
        if current_process is None:
            context_switches.append(time)
            scheduled_processes.append(0)
        else:
            scheduled_processes.append(current_process)
        # Record the times at which the graph changes
        if time > 0 and current_process != scheduled_processes[time - 1]:
            process_switches.append(time)
        time += 1

    statistics = scheduler.return_stats()
    write_stats(statistics)
    plot_graph(times=range(time),
               scheduled_processes=scheduled_processes,
               context_switches=context_switches,
               xticks=process_switches,
               processes_count=processes_count)
コード例 #5
0
            transcoded_bitrate = provider.get_bitrate(args.decimal_places, transcode_output_path)
            size_rounded = force_decimal_places(transcode_size, args.decimal_places)
            data_for_current_row = [f'{size_rounded} MB', transcoded_bitrate]

            # Save the output of libvmaf to the following path.
            json_file_path = f'{output_folder}/Metrics of each frame.json'
             # Run the libvmaf filter.
            run_libvmaf(transcode_output_path, args, json_file_path, fps, original_video_path, factory, duration, crf)

            vmaf_scores.append(get_metrics_save_table(comparison_table, json_file_path, args, args.decimal_places, 
                               data_for_current_row, table, output_folder, time_taken, crf))

            write_table_info(comparison_table, filename, original_bitrate, args, f'Preset {preset}')

        # Plot a bar graph showing the average VMAF score of each CRF value.
        plot_graph('CRF vs VMAF', 'CRF', 'VMAF', crf_values, vmaf_scores, f'{prev_output_folder}/CRF vs VMAF', 
                   bar_graph=True)
            
    # Presets comparison mode.
    elif is_list(args.preset):
        log.info('Presets comparison mode activated.')
        chosen_presets = args.preset
        presets_string = ', '.join(chosen_presets)
        crf = args.crf[0] if is_list(args.crf) else crf
        log.info(f'Presets {presets_string} will be compared at a CRF of {crf}.')
        line()

        prev_output_folder, comparison_table, output_ext = create_output_folder_initialise_table('Preset')

        # The -t/--encode-length argument was specified.
        if args.encode_length:
            original_video_path = cut_video(filename, args, output_ext, prev_output_folder, comparison_table)
コード例 #6
0
ファイル: train.py プロジェクト: Ugenteraan/VGG-16-PyTorch1.5
def main():
    '''
    Train function.
    '''

    ########################################################## Model Initialization & Loading ##########################################################
    vgg = Model(resized_img_size=t_cfg.RESIZED_IMAGE_SIZE,
                num_classes=t_cfg.NUM_CLASSES,
                init_weights=True)

    optimizer = Adam(vgg.parameters(), lr=t_cfg.LEARNING_RATE)  #optimizer
    lr_decay = lr_scheduler.ExponentialLR(
        optimizer, gamma=t_cfg.LR_DECAY_RATE
    )  #scheduler is used to lower the learning rate during training later.
    loss_criterion = torch.nn.CrossEntropyLoss()  #loss function.

    vgg = vgg.to(t_cfg.DEVICE)  #move the network to GPU if available.

    print("--- Model Architecture ---")
    print(vgg)

    if t_cfg.TRAINED_MODEL_PRESENCE:

        model_params = torch.load(
            t_cfg.MODEL_PATH +
            t_cfg.MODEL_NAME)  #reads parameters from the model file.
        vgg.load_state_dict(
            model_params)  #load the parameters into the model architecture.
        print("Model parameters are loaded from the saved file!")

    ########################################################## Data Initialization & Loading ##########################################################
    #Initialize the training data class.
    training_data = LoadDataset(
        resized_image_size=t_cfg.RESIZED_IMAGE_SIZE,
        total_images=t_cfg.TOTAL_TRAIN_DATA,
        classes=t_cfg.CLASSES,
        data_list=t_cfg.TRAIN_IMG_LABEL_LIST,
        transform=transforms.Compose([
            RandomRotate(angle_range=t_cfg.ROTATION_RANGE,
                         prob=t_cfg.ROTATION_PROB),
            RandomShear(shear_range=t_cfg.SHEAR_RANGE, prob=t_cfg.SHEAR_PROB),
            RandomHorizontalFlip(prob=t_cfg.HFLIP_PROB),
            RandomVerticalFlip(prob=t_cfg.VFLIP_PROB),
            RandomNoise(mode=t_cfg.NOISE_MODE, prob=t_cfg.NOISE_PROB),
            ToTensor(mode=0)
        ]))

    testing_data = LoadDataset(resized_image_size=t_cfg.RESIZED_IMAGE_SIZE,
                               total_images=t_cfg.TOTAL_TEST_DATA,
                               classes=t_cfg.CLASSES,
                               data_list=t_cfg.TEST_IMG_LABEL_LIST,
                               transform=transforms.Compose([ToTensor(mode=0)
                                                             ]))

    train_dataloader = DataLoader(training_data,
                                  batch_size=t_cfg.BATCH_SIZE,
                                  shuffle=t_cfg.DATA_SHUFFLE,
                                  num_workers=t_cfg.NUM_WORKERS)
    test_dataloader = DataLoader(testing_data,
                                 batch_size=t_cfg.BATCH_SIZE,
                                 shuffle=t_cfg.DATA_SHUFFLE,
                                 num_workers=t_cfg.NUM_WORKERS)

    ########################################################## Model Training & Saving ##########################################################
    best_accuracy = 0

    entire_loss_list = []
    entire_accuracy_list = []

    for epoch_idx in range(t_cfg.EPOCH):

        print("Training for epoch %d has started!" % (epoch_idx + 1))

        vgg.train()
        epoch_training_loss = []
        epoch_accuracy = []
        i = 0
        for i, sample in tqdm(enumerate(train_dataloader)):

            batch_x, batch_y = sample['image'].to(
                t_cfg.DEVICE), sample['label'].to(t_cfg.DEVICE)

            optimizer.zero_grad(
            )  #clear the gradients in the optimizer between every batch.

            net_output = vgg(batch_x)  #output from the network.

            total_loss = loss_criterion(input=net_output, target=batch_y)

            epoch_training_loss.append(
                total_loss.item())  #append the loss of every batch.

            total_loss.backward()  #calculate the gradients.
            optimizer.step()

            batch_acc = calculate_accuracy(network_output=net_output,
                                           target=batch_y)
            epoch_accuracy.append(batch_acc.cpu().numpy())

        lr_decay.step()  #decay rate update
        curr_accuracy = sum(epoch_accuracy) / (i + 1)
        curr_loss = sum(epoch_training_loss)

        print("The accuracy at epoch %d is %g" % (epoch_idx, curr_accuracy))
        print("The loss at epoch %d is %g" % (epoch_idx, curr_loss))

        entire_accuracy_list.append(curr_accuracy)
        entire_loss_list.append(curr_loss)

        vgg.eval()
        epoch_testing_accuracy = []
        epoch_testing_loss = []
        j = 0
        with torch.no_grad():

            for j, test_sample in tqdm(enumerate(test_dataloader)):

                batch_x, batch_y = test_sample['image'].to(
                    t_cfg.DEVICE), test_sample['label'].to(t_cfg.DEVICE)

                net_output = vgg(batch_x)

                total_loss = loss_criterion(input=net_output, target=batch_y)

                epoch_testing_loss.append(total_loss.item())

                batch_acc = calculate_accuracy(network_output=net_output,
                                               target=batch_y)
                epoch_testing_accuracy.append(batch_acc.cpu().numpy())

            test_accuracy = sum(epoch_testing_accuracy) / (j + 1)
            test_loss = sum(epoch_testing_loss)

            print("The testing accuracy at epoch %d is %g" %
                  (epoch_idx, test_accuracy))
            print("The testing loss at epoch %d is %g" %
                  (epoch_idx, test_loss))

        if test_accuracy > best_accuracy:

            torch.save(vgg, t_cfg.MODEL_PATH + t_cfg.MODEL_NAME)
            best_accuracy = test_accuracy
            print("Model is saved !")

    ########################################################## Graphs ##########################################################
    if t_cfg.PLOT_GRAPH:
        plot_graph(t_cfg.EPOCH, "Epoch", "Training Loss",
                   "Training Loss for %d epoch" % (t_cfg.EPOCH), "./loss.png",
                   [entire_loss_list, 'r--', "Loss"])
        plot_graph(t_cfg.EPOCH, "Epoch", "Training Accuracy",
                   "Training Accuracy for %d epoch" % (t_cfg.EPOCH),
                   "./accuracy.png", [entire_accuracy_list, 'b--', "Accuracy"])
コード例 #7
0
        classes = utils.get_classes(dataset)
        features = range(len(X_train[0]))

        root = DT(dataset, classes, features, 0, depth)
        Y_pred = utils.classify(root, X_test)

        for k in xrange(1, K+1):
            if k <= len(Y_test):
                top_K_indices = utils.get_recommendations(Y_pred, k)
                precision, recall = utils.compute_metrics(Y_pred, Y_test, top_K_indices)
                precision_dict[k].append(precision)
                recall_dict[k].append(recall)

        MAE = utils.calc_MAE(Y_pred, Y_test)
        RMSE = utils.calc_RMSE(Y_pred, Y_test)
        accu = utils.accuracy(Y_pred, Y_test)
        
        MAE_arr.append(MAE)
        RMSE_arr.append(RMSE)
        accuracy_arr.append(accu)
        print "User: {} - Test accuracy: {}".format(index+1, accu)

    print "\nTime taken: {} sec".format(timer() - start)
    print 'Average MAE: {}'.format(sum(MAE_arr)/float(len(MAE_arr)))
    print 'Average RMSE: {}'.format(sum(RMSE_arr)/float(len(RMSE_arr)))
    print 'Maximum test accuracy: {}'.format(max(accuracy_arr))
    print 'Minimum test accuracy: {}'.format(min(accuracy_arr))
    print 'Average Accuracy: {}'.format(sum(accuracy_arr)/float(len(accuracy_arr)))
    utils.write_to_file(accuracy_arr)
    utils.plot_graph(precision_dict, recall_dict)
コード例 #8
0
y_test = y_test.astype(np.int32).reshape(10000, 1)


normal_model = create_keras_model()
normal_model.compile(optimizer=optimizers.Adam(learning_rate=lr),
                     loss=losses.SparseCategoricalCrossentropy(),
                     metrics=[metrics.SparseCategoricalAccuracy()])

# training
history = normal_model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test))
normal_model.save(model_dir / model_name)


# plotting accuracy and loss graph
fig = plt.figure(figsize=(10, 6))
plot_graph(range(1, len(history.epoch)+1), history.history['sparse_categorical_accuracy'], label='Train Accuracy')
plot_graph(range(1, len(history.epoch)+1), history.history['val_sparse_categorical_accuracy'], label='Validation Accuracy')
plt.legend()
plt.savefig(output_dir / "normal_model_Accuracy.png")


plt.figure(figsize=(10, 6))
plot_graph(range(1, len(history.epoch)+1), history.history['loss'], label='Train loss')
plot_graph(range(1, len(history.epoch)+1), history.history['val_loss'], label='Validation loss')
plt.legend()
plt.savefig(output_dir / "normal_model_loss.png")

# saving metric values to text file

txt_file_path = output_dir / (experiment_name + ".txt")
with open(txt_file_path.as_posix(), "w") as handle:
コード例 #9
0
ファイル: simulation.py プロジェクト: RuralCat/task_sim
        # set param
        sim.set_param(param_name, param)
        # run
        sim.run()
        # get analysis information
        mean_time, completion_in24_rate,os_work_time, ss_work_time, \
        mean_os_work_time, mean_ss_work_time = sim.logging()
        # save to list
        mean_time_l.append(mean_time)
        completion_in24_rate_l.append(completion_in24_rate)
        mean_os_work_time_l.append(mean_os_work_time)
        mean_ss_work_time_l.append(mean_ss_work_time)
        # save logging
        sim.save_logging('mean task aging', mean_time)
        sim.save_logging('task completion rate in 24 hours', completion_in24_rate)
        sim.save_logging('outer sourcing work time', os_work_time)
        sim.save_logging('self supporting work time', ss_work_time)
        sim.save_logging('mean outer sourcing work time', mean_os_work_time)
        sim.save_logging('mean self supporting work time', mean_ss_work_time)
        sim.save_logging()
    # save list
    sim.save_logging('list res')
    sim.save_logging('mean task aging list', mean_time_l)
    sim.save_logging('task completion rate list', completion_in24_rate_l)
    sim.save_logging('mean outer sourcing work time list', mean_os_work_time_l)
    sim.save_logging('mean self supporting work time list', mean_ss_work_time_l)
    # plot
    plot_graph(params, title_name, mean_time_l, completion_in24_rate_l,
               mean_os_work_time_l, mean_ss_work_time_l, save_path + '.jpg')

コード例 #10
0
    def run(self, training_data, testing_data, fold_num):
        """Método principal de execução do multilayer perceptron"""
        fold_num = fold_num + 1

        files = ('config', 'error')

        for file in files:
            file_command = '{output}{file}-{fold}.txt'.format(file=file, fold=fold_num,
                output=self.output_directory)

            if file == 'config':
                self.config_f = open(file_command, "w")
                self.config_write()
            elif file == 'error':
                self.error_f = open(file_command, "w")

        # Torna aleatória a lista de arquivos para treinamento e teste
        random.shuffle(training_data)
        random.shuffle(testing_data)

        self.start_fold = datetime.now()

        self.error_f.write("Execucao em {} \n\n".format(time.strftime("%d/%m/%Y %H:%M")))
        print ("\nK-Fold with max {} epochs started at: {}\n".format(self.epochs,
            self.start_fold.strftime("%Y-%m-%d %H:%M:%S")))

        for epoch_current in range(self.epochs):
            # u.print_title_epoch(epoch_current + 1, fold_num, 'training',
            #     self.part_2, self.descriptor)

            # treinamento de 4/5 do fold
            for image_i, image in enumerate(training_data):
                self.training(image, image_i, epoch_current + 1, fold_num)

            # erro médio de treinamento
            self.error_training_avg = self.error_training_avg / self.training_number

            # serialização dos pesos desta época (model.dat)
            u.serialize_model(fold_num, self.weights_0, self.weights_1, self.output_directory)

            # teste de 1/5 do fold
            # u.print_title_epoch(epoch_current + 1, fold_num,
            #     'testing', self.part_2, self.descriptor)

            for image_i, image in enumerate(testing_data):
                self.testing(image, image_i)

            # erro médio de teste
            self.error_test_avg = self.error_test_avg / self.test_number

            # salva o erro quadratico médio desta época
            self.errors_test_avg_list.append(self.error_test_avg)

            # atualização da lista de erros de teste
            self.errors_test_list.append(self.error_test_avg)
            # u.error_list_update(self.error_test_avg, self.errors_list)

            # atualiza'ccão da lista de erros de treinamento
            self.errors_training_list.append(self.error_training_avg)

            # gravação dos erros quadráticos médios
            self.error_f.write("{};{};{}\n".format(epoch_current, self.error_training_avg,
             self.error_test_avg))

            # reinicialização das médias de erros quadráticos com 0 para a próxima época
            self.error_training_avg = 0
            self.error_test_avg = 0
            self.test_number = 0
            self.training_number = 0

            # atualização da taxa de aprendizado e condição de parada por taxa de aprendizado
            if self.alpha > 0.001:
                self.alpha = 0.9 * self.alpha

            # condicao de parada por erro
            stop_condition = u.stop_condition(self.errors_test_list, epoch_current, self.alpha)
            if stop_condition['result']:
                break

        self.get_confusion_matrix_and_accuracy(fold_num)

        # média total
        mean_total = np.mean(self.errors_test_avg_list)

        # desvio padrão
        std_dev = np.std(self.errors_test_avg_list)

        self.config_f.write("epoca_final: {}\n".format(stop_condition['message']))
        self.config_f.write("epoca_final: {}\n".format(epoch_current))
        self.config_f.write("media_total: {}\n".format(mean_total))
        self.config_f.write("desvio_padrao: {}\n".format(std_dev))

        self.end_fold = datetime.now()

        print ("\nK-Fold {}/5:\tMax Epoch (s):  \t{}\tStart Time:\t{}".format(fold_num, self.epochs,
            self.start_fold.strftime("%Y-%m-%d %H:%M:%S")))

        print ("K-Fold {}/5:\tTotal Epoch (s):\t{}\tEnd Time:\t{}".format(fold_num,
            epoch_current + 1,
            self.end_fold.strftime("%Y-%m-%d %H:%M:%S")))

        print ("K-Fold {}/5:\t\t\t\t\tRun. Time:\t{}\n".format(fold_num,
            self.end_fold - self.start_fold))

        self.config_f.close()
        self.error_f.close()
        u.plot_graph(fold_num, self.errors_test_list, self.errors_training_list, self.output_directory)
コード例 #11
0
ファイル: main.py プロジェクト: tanouch/robust_gan_github
def main(args):
    # reproducibility
    # ---------------
    path = str(args['seed']) + "/"
    if (not os.path.isdir(path)):
        os.mkdir(path)

    np.random.seed(args['seed'])
    tf.set_random_seed(args['seed'])
    # ---------------

    # parameters
    # ---------------
    dim = args['dim']
    distrib_name = args['distribution']
    data_size = args['data_size']
    # ---------------

    # create dataset
    # ---------------
    # TODO train/tests split
    distribution = distrib.create_distribution(name=distrib_name, dim=dim)
    distribution.create_dataset(data_size)

    fig = plt.figure(figsize=(25, 13))
    ax = plt.subplot(1, 1, 1)
    ax.scatter(distribution.Xtrain[:, 0],
               distribution.Xtrain[:, 1],
               c=distribution.Ytrain,
               cmap='bwr')
    # ---------------

    with tf.Session() as session:
        # create invnet/gan
        # ---------------
        invnet = InvNet(dim=dim, depth=args['inv_depth'], name='invnet')
        invnet_cpy = InvNet(dim=dim,
                            depth=args['inv_depth'],
                            name='invnet_copy')

        llog_optimizer = create_estimator(gd_estimator='llog',
                                          generator=invnet,
                                          lr=args['gen_lr'])

        pd_optimizer = create_estimator(gd_estimator='pd',
                                        generator=invnet_cpy,
                                        lr=args['gen_lr'],
                                        bsz=1000)

        # ---------------
        # create classifier
        # ---------------
        def init_classic_mlp(l2_param, gp_param):
            return MlpBinaryClassifier(input_dim=dim,
                                       depth=args['classif_depth'],
                                       width=args['classif_width'],
                                       facq=tf.nn.tanh,
                                       lr=args['classif_lr'],
                                       l2_param=l2_param,
                                       gp_param=gp_param,
                                       name='rcmlp')

        mlp = init_classic_mlp(l2_param=0., gp_param=0.)
        base_mlp = init_classic_mlp(l2_param=0., gp_param=0.)
        l2_mlp = init_classic_mlp(l2_param=1., gp_param=0.0)
        gp_mlp = init_classic_mlp(l2_param=0., gp_param=0.2)

        # init
        # ---------------
        session.run(tf.global_variables_initializer())
        # ---------------

        # Initial training of invertible network
        # ---------------
        print('Training Invertible Neural Network..')
        train_invnet_llog(session,
                          invnet=invnet,
                          llog_optimizer=llog_optimizer,
                          X=distribution.Xtrain,
                          iters=1000)

        def train_classic_mlp(mlp):
            mlp._train(session=session,
                       X=distribution.Xtrain,
                       Y=distribution.Ytrain,
                       distribution=distribution,
                       iters=30000)

        print("Train baseline")
        train_classic_mlp(base_mlp)
        print("Train with l2 reg")
        train_classic_mlp(l2_mlp)
        print("Train with gradient penalty")
        train_classic_mlp(gp_mlp)
        print("")

        # decision function
        def f_cl(cl):
            def f(x):
                x = np.reshape(x, [-1, dim])
                feed_dict = {cl.x: x}
                return session.run(cl.y, feed_dict)

            return f

        base_f, l2_f, gp_f = f_cl(base_mlp), f_cl(l2_mlp), f_cl(gp_mlp)

        # Optimization
        # ---------------
        invnet_cpy.copy(session, invnet)
        robust_mlp_eps_acc = list()
        for i in range(15):
            # sample points from invnet
            x_invnet, _ = invnet_cpy.sample(session, 1000)
            y_invnet = distribution.knn(x_invnet, 10)
            scatter = ax.scatter(x_invnet[:, 0],
                                 x_invnet[:, 1],
                                 c=y_invnet,
                                 alpha=0.2,
                                 cmap='bwr')

            # train classifier under invnet distribution
            print('Train robust classifier under invnet distribution')
            accs = mlp._train(session=session,
                              X=x_invnet,
                              Y=y_invnet,
                              distribution=distribution,
                              iters=2500)
            robust_mlp_eps_acc.append(accs)

            robust_f = f_cl(mlp)
            ut.plot_graph(path, ax, scatter, distribution,
                          [base_f, l2_f, gp_f, robust_f], i)

            # adversarial training of the invnet
            print("Train invnet..")
            invnet_cpy.copy(session, invnet)
            train_invnet_pd(session=session,
                            invnet=invnet_cpy,
                            disc=mlp,
                            distribution=distribution,
                            pd_optimizer=pd_optimizer,
                            iters=10)
            print("")
コード例 #12
0
def solver(algorithm, k_coloring, number_of_nodes, max_steps, number_of_runs):
    """Map coloring problem solver"""
    if k_coloring != 3 and k_coloring != 4:
        k_coloring = 4

    print("Solving for these parameters: ", "Algorithm=", algorithm,
          ", Number of colors=", k_coloring, ", Number of nodes=",
          number_of_nodes, "\nNumber of runs=", number_of_runs)
    if algorithm == "mc":
        print("Max Steps used by min-conflicts algorithm=", max_steps)

    # Variables used to gather statistics
    time_sum = 0
    solution_found_count = 0

    # Main loop for each run
    for i in range(number_of_runs):
        # Generate a random graph.
        graph, pos, edges = generate_random_graph(number_of_nodes)
        if number_of_runs == 1:
            # Plot the graph only if the number of runs is 1
            plot_graph(pos, edges, number_of_nodes, False, k_coloring, [])

        # Choose an algorithm to solve the graph
        # 1-Backtracking Algorithm
        if algorithm == "bt":
            start_time = time.monotonic()
            solution_exits, answer = backtracking(number_of_nodes, graph,
                                                  k_coloring)
            end_time = time.monotonic()
            time_sum += (end_time - start_time)
            if solution_exits:
                solution_found_count += 1
                if number_of_runs == 1:
                    print("Color Assignment", answer)
                    plot_graph(pos, edges, number_of_nodes, True, k_coloring,
                               answer)
            else:
                if number_of_runs == 1:
                    print("No Solution exists.")
        # 2-Min-conflicts Algorithm
        elif algorithm == "mc":
            start_time = time.monotonic()
            answer = min_conflicts(graph, number_of_nodes, k_coloring,
                                   max_steps)
            end_time = time.monotonic()
            time_sum += (end_time - start_time)
            if answer:
                solution_found_count += 1
                if number_of_runs == 1:
                    print("Color Assignment", answer)
                    plot_graph(pos, edges, number_of_nodes, True, k_coloring,
                               answer)
            else:
                if number_of_runs == 1:
                    print("No Solution exists.")
        # 3-Backtracking with forward checking
        elif algorithm == "bt-fc":
            start_time = time.monotonic()
            solution_exits, answer = backtracking_with_forward_checking(
                number_of_nodes, graph, k_coloring)
            end_time = time.monotonic()
            time_sum += (end_time - start_time)
            if solution_exits:
                solution_found_count += 1
                answer = modify_answer_format(answer, number_of_nodes)
                if number_of_runs == 1:
                    print("Color Assignment", answer)
                    plot_graph(pos, edges, number_of_nodes, True, k_coloring,
                               answer)
            else:
                if number_of_runs == 1:
                    print("No Solution exists.")
        # 4-Backtracking with mac
        elif algorithm == "bt-mac":
            start_time = time.monotonic()
            solution_exits, answer = backtracking_with_mac(
                number_of_nodes, graph, k_coloring)
            end_time = time.monotonic()
            time_sum += (end_time - start_time)
            if solution_exits:
                solution_found_count += 1
                answer = modify_answer_format(answer, number_of_nodes)
                if number_of_runs == 1:
                    print("Color Assignment", answer)
                    plot_graph(pos, edges, number_of_nodes, True, k_coloring,
                               answer)
            else:
                if number_of_runs == 1:
                    print("No Solution exists.")

    # Display statistics if number of runs is more than one
    if number_of_runs > 1:
        avg_runtime = time_sum / number_of_runs
        percentage_of_finding_a_solution = solution_found_count / number_of_runs
        print("Average Run Time = ", avg_runtime,
              ", Percentage of finding a solution = ",
              percentage_of_finding_a_solution * 100, "%")
    if number_of_runs == 1:
        print("Run Time = ", time_sum)
コード例 #13
0
def get_metrics_save_table(comparison_table, json_file_path, args, decimal_places, data_for_current_row, 
						   table, output_folder, time_taken, crf_or_preset=None):
	with open(json_file_path, 'r') as f:
		file_contents = json.load(f)

	# Get the VMAF score of each frame from the JSON file created by libvmaf.
	vmaf_scores = [frame['metrics']['vmaf'] for frame in file_contents['frames']]

	# Calculate the mean, minimum and standard deviation.
	mean_vmaf = force_decimal_places(np.mean(vmaf_scores), decimal_places)
	min_vmaf = force_decimal_places(min(vmaf_scores), decimal_places)
	vmaf_std = force_decimal_places(np.std(vmaf_scores), decimal_places)

	frame_numbers = [frame['frameNum'] for frame in file_contents['frames']]

	plot_graph(f'VMAF\nn_subsample: {args.subsample}', 'Frame Number', 'VMAF', frame_numbers,
			   vmaf_scores, os.path.join(output_folder, 'VMAF'))

	# Add the VMAF values to the table.
	data_for_current_row.append(f'{min_vmaf} | {vmaf_std} | {mean_vmaf}')

	ssim_string = ''
	psnr_string = ''
	
	if args.calculate_ssim:
		ssim_string = '/SSIM'
		# Get the SSIM score of each frame from the JSON file created by libvmaf.
		ssim_scores = [ssim['metrics']['ssim'] for ssim in file_contents['frames']]

		mean_ssim = force_decimal_places(np.mean(ssim_scores), decimal_places)
		min_ssim = force_decimal_places(min(ssim_scores), decimal_places)
		ssim_std = force_decimal_places(np.std(ssim_scores), decimal_places) # Standard deviation.
	
		log.info(f'Creating SSIM graph...')
		plot_graph(f'SSIM\nn_subsample: {args.subsample}', 'Frame Number', 'SSIM', frame_numbers,
			       ssim_scores, mean_ssim, os.path.join(output_folder, 'SSIM'))

		# Add the SSIM values to the table.
		data_for_current_row.append(f'{min_ssim} | {ssim_std} | {mean_ssim}')

	if args.calculate_psnr:
		psnr_string = '/PSNR'
		# Get the PSNR score of each frame from the JSON file created by libvmaf.
		psnr_scores = [psnr['metrics']['psnr'] for psnr in file_contents['frames']]

		mean_psnr = force_decimal_places(np.mean(psnr_scores), decimal_places)
		min_psnr = force_decimal_places(min(psnr_scores), decimal_places)
		psnr_std = force_decimal_places(np.std(psnr_scores), decimal_places) # Standard deviation.

		log.info(f'Creating PSNR graph...')
		plot_graph(f'PSNR\nn_subsample: {args.subsample}', 'Frame Number', 'PSNR', frame_numbers,
				   psnr_scores, mean_psnr, os.path.join(output_folder, 'PSNR'))

		# Add the PSNR values to the table.
		data_for_current_row.append(f'{min_psnr} | {psnr_std} | {mean_psnr}')

	if not args.no_transcoding_mode:
		data_for_current_row.insert(0, crf_or_preset)
		data_for_current_row.insert(1, time_taken)
	
	table.add_row(data_for_current_row)
	table_title = f'VMAF{ssim_string}{psnr_string} values are in the format: Min | Standard Deviation | Mean'

	# Write the table to the Table.txt file.
	with open(comparison_table, 'w') as f:
		f.write(table.get_string(title=table_title))

	log.info(f'{comparison_table} has been updated.')
	line()
	return float(mean_vmaf)
コード例 #14
0
def create_quality_score_graph(python_filename):
    '''
    Creates a quality score graph from the information
    in the (python_filename).score.csv file,
    created by the lint function in lint.py.
    
    The (python_filename).score.csv file is first
    opened and the contents stored in the variable
    list_of_scores.
    
    Two empty lists are created to put each element of the
    list of scores. x_axis_ticks will contain the date and
    time values in the csv file, while the each_score
    variable will contain each score from the file.
    
    The for loop is the element that moves this data into
    the respective lists. We only want the last 20 results
    to be put into the graph, so we limit the for loop to
    only go through the last 20 items from the list.
    Hence why we stipulate list_of_scores[-20:].
    
    We next call the function plot_graph from the utils.py
    program.
    
        1.    each_score goes into the y-variable location
    
        2.    x_axis_ticks goes into the x-variable location
           
        3.    we want the y axis max to be 10, since that is the most
              the score can go up to
    
        4.    the x axis title is 'Date and Time'
    
        5.    the y axis title is 'Score (out of 10)'
    
        6.    the title of the whole graph is 'Quality Score Graph'
    
        7.    the filename will be python_filename[:-2] + 'history.svg'
    
    Parameters:
    
    python_filename: The name of the python file you want to create
    a quality score graph for.
    
    Example:
    
    python_filename = 'naughty.py'
    >>>None
    
    creates a svg file called naughty.history.svg
    
    '''
    # Reads the contents of the .score.csv file and puts it into
    # a list
    with open(python_filename[:-2] + 'score.csv', 'rb') as contents:
        reader = csv.reader(contents)
        list_of_scores = list(reader)
    
    x_axis_ticks = []
    each_score = []
    
    # Takes the last 20 scores and appends the contents into
    # the two empty lists x_axis_ticks and each_score
    for each_line in list_of_scores[-20:]:
        x_axis_ticks.append(each_line[0])
        each_score.append(each_line[1])

    # Calls the plot_graph function from utils.py
    plot_graph(each_score, x_axis_ticks, 10, 'Date and Time',
               'Score (out of 10)' , 'Lint score history for ' +
               python_filename, python_filename[:-2] + 'history.svg')
コード例 #15
0
def main():
    '''
    Train function.
    '''
    os.environ[
        'TORCH_HOME'] = t_cfg.MODEL_PATH  #set the env variable so the model is downloaded inside this folder.

    ########################################################## Model Initialization & Loading ##########################################################
    model_instance = model.Model(model_download_path=t_cfg.MODEL_PATH,
                                 new_model_name=t_cfg.MODEL_NAME,
                                 input_feature_size=t_cfg.FEATURE_INPUT_SIZE,
                                 num_class=t_cfg.NUM_CLASSES)

    vgg_model = model_instance()

    optimizer = Adam(vgg_model.parameters(),
                     lr=t_cfg.LEARNING_RATE)  #optimizer
    lr_decay = lr_scheduler.ExponentialLR(
        optimizer, gamma=t_cfg.LR_DECAY_RATE
    )  #scheduler is used to lower the learning rate during training later.
    loss_criterion = torch.nn.CrossEntropyLoss()  #loss function.

    vgg_model = vgg_model.to(
        t_cfg.DEVICE)  #move the network to GPU if available.

    print("--- Model Architecture ---")
    print(vgg_model)

    ########################################################## Data Initialization & Loading ##########################################################
    #Initialize the training data class.
    training_data = LoadDataset(
        resized_image_size=t_cfg.RESIZED_IMAGE_SIZE,
        total_images=t_cfg.TOTAL_DATA,
        classes=t_cfg.CLASSES,
        data_list=t_cfg.IMG_LABEL_LIST,
        transform=transforms.Compose([
            RandomRotate(angle_range=t_cfg.ROTATION_RANGE,
                         prob=t_cfg.ROTATION_PROB),
            RandomShear(shear_range=t_cfg.SHEAR_RANGE, prob=t_cfg.SHEAR_PROB),
            RandomHorizontalFlip(prob=t_cfg.HFLIP_PROB),
            RandomVerticalFlip(prob=t_cfg.VFLIP_PROB),
            RandomNoise(mode=t_cfg.NOISE_MODE, prob=t_cfg.NOISE_PROB),
            ToTensor(mode='training')
        ]))

    dataloader = DataLoader(training_data,
                            batch_size=t_cfg.BATCH_SIZE,
                            shuffle=t_cfg.DATA_SHUFFLE,
                            num_workers=t_cfg.NUM_WORKERS)

    ########################################################## Model Training & Saving ##########################################################
    best_accuracy = 0

    entire_loss_list = []
    entire_accuracy_list = []

    for epoch_idx in range(t_cfg.EPOCH):

        print("Training for epoch %d has started!" % (epoch_idx + 1))

        epoch_training_loss = []
        epoch_accuracy = []
        i = 0
        for i, sample in tqdm(enumerate(dataloader)):

            batch_x, batch_y = sample['image'].to(
                t_cfg.DEVICE), sample['label'].to(t_cfg.DEVICE)

            optimizer.zero_grad(
            )  #clear the gradients in the optimizer between every batch.

            net_output = vgg_model(batch_x)  #output from the network.

            total_loss = loss_criterion(input=net_output, target=batch_y)

            epoch_training_loss.append(
                total_loss.item())  #append the loss of every batch.

            total_loss.backward()  #calculate the gradients.
            optimizer.step()

            batch_acc = calculate_accuracy(network_output=net_output,
                                           target=batch_y)
            epoch_accuracy.append(batch_acc.cpu().numpy())

        lr_decay.step()  #decay rate update
        curr_accuracy = sum(epoch_accuracy) / i
        curr_loss = sum(epoch_training_loss)

        print("The accuracy at epoch %d is %g" % (epoch_idx, curr_accuracy))
        print("The loss at epoch %d is %g" % (epoch_idx, curr_loss))

        entire_accuracy_list.append(curr_accuracy)
        entire_loss_list.append(curr_loss)

        if curr_accuracy > best_accuracy:

            torch.save(vgg_model.state_dict(), t_cfg.SAVE_PATH)
            best_accuracy = curr_accuracy
            print("Model is saved !")

    ########################################################## Graphs ##########################################################
    if t_cfg.PLOT_GRAPH:
        plot_graph(t_cfg.EPOCH, "Epoch", "Training Loss",
                   "Training Loss for %d epoch" % (t_cfg.EPOCH), "./loss.png",
                   [entire_loss_list, 'r--', "Loss"])
        plot_graph(t_cfg.EPOCH, "Epoch", "Training Accuracy",
                   "Training Accuracy for %d epoch" % (t_cfg.EPOCH),
                   "./accuracy.png", [entire_accuracy_list, 'b--', "Accuracy"])
コード例 #16
0
metric_collection = {
    "sparse_categorical_accuracy": tff_train_acc,
    "val_sparse_categorical_accuracy": tff_val_acc,
    "loss": tff_train_loss,
    "val_loss": tff_val_loss
}

if eval_model:
    eval_model.save(model_dir / (experiment_name + ".h5"))
else:
    print("training didn't started")
    exit()

fig = plt.figure(figsize=(10, 6))
plot_graph(list(range(1, 26))[4::5], tff_train_acc, label='Train Accuracy')
plot_graph(list(range(1, 26))[4::5], tff_val_acc, label='Validation Accuracy')
plt.legend()
plt.savefig(output_dir / "federated_model_Accuracy.png")

plt.figure(figsize=(10, 6))
plot_graph(list(range(1, 26))[4::5], tff_train_loss, label='Train loss')
plot_graph(list(range(1, 26))[4::5], tff_val_loss, label='Validation loss')
plt.legend()
plt.savefig(output_dir / "federated_model_loss.png")

# saving metric values to text file

txt_file_path = output_dir / (experiment_name + ".txt")
with open(txt_file_path.as_posix(), "w") as handle:
    content = []
コード例 #17
0
ファイル: model.py プロジェクト: 0x17/VRP
def parse_results(inst, Xijm):
    tours = [[(i, j) for i in inst.I for j in inst.I if Xijm[i][j][m].x == 1.0] for m in inst.M]
    print(tours)
    edges = [edge for m in inst.M for edge in tours[m]]
    utils.plot_graph(inst.I, edges, inst.vertex_labels(), inst.edge_labels(edges), 'solution')
コード例 #18
0
    header1=["UAID","IP","Host"]
    temp=1
    for v in data.itervalues():
        if len(v) > temp:
             temp=len(v)
    
    for i in range(temp):
        header1.extend(["Diff"])
    utils.convert_csv(data=data, header=header1,filename=filename)
     
    utils.sort_csv(filename=filename)
    
#    avg.append(utils.calculate_avg(filename=filename))
    avg.extend( utils.calculate_avg(filename=filename))
    node_avg.append(utils.calculate_node_avg(filename=filename))
    utils.plot_graph(filename=filename,ip=ip)
#    files.append(filename)
    print(files)


#print len(avg)
#print node_avg
filename=utils.sort_node_based(node_avg)
header=["Node","IPv4 Time","IPv4 UAID Count","IPv6 Time","IPv6 UAID Count"]
files.extend([filename,'/mnt/push_test/Device_IPv4.png','/mnt/push_test/Device_IPv6.png','/mnt/push_test/diff_Device_IPv4.csv','/mnt/push_test/diff_Device_IPv6.csv'])
print("############################################")
print(files)

f= open("/mnt/push_test/message.txt","w+")
f.write("Hi, \n\nAverage connected time for IPv4 devices: " + str(round(avg[0],2))+" minutes" +"\n\n" + "Total IPv4 Devices: " + str(avg[1]) +"\n\n" + "Average Connected time for IPv6 devices: "+ str(round(avg[2],2))+" minutes"+"\n\n" +"Total IPv6 Devices: "+str(avg[3])+"\n\nTime Duration: "+str(before)+" to "+str(now)+"\n\nDownload report from below URL:\n\n"+"http://10.144.182.51/Report/"+"\n\nRegards, \nElastic Stack")
#
コード例 #19
0
ファイル: instance.py プロジェクト: 0x17/VRP
def visualize_instance(inst: Instance):
    utils.plot_graph(inst.I, inst.undirected_edges(), inst.vertex_labels(),
                     inst.edge_labels(inst.undirected_edges()), 'instance',
                     'graph')
コード例 #20
0
metric_file_2 = this_dir / "results" / experiment_name / method_2 / (experiment_name + ".txt")

output_dir = this_dir / "results" / experiment_name / "compare"
if not output_dir.exists():
    output_dir.mkdir(parents=True)


metric_1 = {}
with open(metric_file_1, "r") as f:
    data = f.read()
    for line in data.split("\n"):
        line = line.split(" ")
        metric_1[line[0]] = [float(l) for l in line[1:]]

metric_2 = {}
with open(metric_file_2, "r") as f:
    data = f.read()
    for line in data.split("\n"):
        line = line.split(" ")
        metric_2[line[0]] = [float(l) for l in line[1:]]

common_metrics = set(metric_1.keys()).intersection(set(metric_2.keys()))
print("Common metrics : ", common_metrics)

for m in common_metrics:
    plt.figure(figsize=(10, 6))
    plt.suptitle(m, fontsize=15)
    plot_graph(range(1, len(metric_1[m]) + 1), metric_1[m], label=method_1)
    plot_graph(list(range(1, len(metric_2[m])*5 + 1))[4::5], metric_2[m], label=method_2)
    plt.legend()
    plt.savefig(output_dir / (m + ".png"))