コード例 #1
0
        print(idx, end='\t\t')
        for k, v in log_dict.items():
            if k not in [
                    'image_path', 'initial_reward', 'initial_action',
                    'ref_size', 'upload_size'
            ]:
                print("%s:%.2f" % (k, v), end='\t')
        print('\n')

        plot_keys = [
            'accuracy', 'size_reward', 'reward', 'action', 'recent_accuracy',
            'upload_size', 'agent_epsilon', 'agent_accuracy', 'recent_reward'
        ]
        plot_durations(np.array(
            [running_agent.running_log[key] for key in plot_keys]),
                       title_list=plot_keys)

        if idx % 5 == 0:
            plt.savefig('evaluation_results/testset_imagenet_baidu.png',
                        dpi=100)

            with open('evaluation_results/image_reference_cache.defaultdict',
                      'wb') as f:
                pickle.dump(ref_cache, f)

            rm.save(running_agent.running_log,
                    name='testset_imagenet_baidu',
                    topic="AgentRetrain",
                    comment="baidu agent on imagenet testset",
                    replace_version='latest')
コード例 #2
0
                                               y=np.expand_dims(label, axis=0),
                                               verbose=2)[1]
            acc_list.append(accuracy)
            size_list.append(size)
            banchmark_size.append(ref_size)

        batch_acc = np.sum(acc_list) / len(acc_list)
        batch_aver_size = np.mean(size_list)

        print("batch %s\taccuray %.2f\taverage size %.2f" %
              (idx + 1, batch_acc, batch_aver_size))
        evaluation_result['accuracies'].append(batch_acc)
        evaluation_result['batch_sizes'].append(batch_aver_size)
        evaluation_result['sizes'] += size_list
        evaluation_result['bm_sizes'] += banchmark_size

    # except Exception as e:
    #     print(e)
    #     print("Exit...")
    # finally:
    #     print("saving...")
    #     if not EVALUATION:
    #         agent.save_params("Agent_params/")
    rm.save(
        evaluation_result,
        name='Q_agent_eval',
        topic='AgentTest',
        comment="evaluation result on 3K of Q agent(ref Q=75)",
        # replace_version='latest',
    )
コード例 #3
0
        # plot_durations(np.array([running_agent.running_log[key] for key in plot_keys]),
        #                title_list=plot_keys)

        if idx % 5 == 0:
            # plt.savefig('evaluation_results/running_retrain_fullDNIM.png', dpi=100)

            # with open('evaluation_results/image_reference_cache_face_initial.defaultdict', 'wb') as f:
            #     pickle.dump(ref_cache, f)
            # with open('evaluation_results/image_reference_cache_face_retrain_DNIM.defaultdict', 'wb') as f:
            #     pickle.dump(ref_cache, f)
            # with open('evaluation_results/image_reference_cache_baidu_retrain_DNIM.defaultdict', 'wb') as f:
            # with open('evaluation_results/image_reference_cache_baidu_initial_DNIM.defaultdict', 'wb') as f:
            #     pickle.dump(ref_cache, f)
            # with open('evaluation_results/baidu_initial_DNIM_retrain_imagenet.defaultdict', 'wb') as f:
            #     pickle.dump(ref_cache, f)
            with open('evaluation_results/aws_initial_DNIM.defaultdict',
                      'wb') as f:
                pickle.dump(ref_cache, f)

            rm.save(
                running_agent.running_log,
                # name='DNIM_agent_on_imagenet',
                # name='DNIM_agent_initial',
                # name='DNIM_agent_retrain',
                # name='DNIM_agent_retrain_baidu',
                # name='baidu_initial_DNIM_retrain_imagenet',
                name='aws_initial_DNIM',
                topic="AgentRetrain",
                comment="retrain running log with 0.5 exploration rate",
                replace_version='latest')
コード例 #4
0
            #     pickle.dump(ref_cache, f)
            # with open('evaluation_results/image_reference_cache_baidu_retrain_DNIM.defaultdict', 'wb') as f:
            # with open('evaluation_results/image_reference_cache_baidu_initial_DNIM.defaultdict', 'wb') as f:
            #     pickle.dump(ref_cache, f)
            # with open('evaluation_results/baidu_initial_DNIM_retrain_imagenet.defaultdict', 'wb') as f:
            #     pickle.dump(ref_cache, f)
            # with open('evaluation_results/baidu_imagenet_model_inference_DNIM.defaultdict', 'wb') as f:
            #     pickle.dump(ref_cache, f)
            with open('evaluation_results/baidu_all_DNIM_imagenet.defaultdict',
                      'wb') as f:
                pickle.dump(ref_cache, f)

            rm.save(
                running_agent.running_log,
                # name='DNIM_agent_on_imagenet',
                # name='DNIM_agent_initial',
                # name='DNIM_agent_retrain',
                # name='DNIM_agent_retrain_baidu',
                # name='baidu_initial_DNIM_retrain_imagenet',
                # name='baidu_inference_DNIM_retrain_imagenet',
                # name='baidu_imagenet_model_inference_DNIM',
                # name='baidu_imagenet_model_inference_DNIM_code',
                # name='baidu_all_DNIM_model',
                # name='baidu_all_DNIM_model_inference',
                # name='baidu_DNIM_two_imagenet_DNIM2',
                # name='baidu_DNIM_two_imagenet_load_DNIM1',
                # name='baidu_FLIR_two_imagenet_FLIR3',
                name='mechanism_train_2k_FLIR',
                topic="AgentRetrain",
                comment="retrain running log with 0.5 exploration rate",
                replace_version='latest')
コード例 #5
0
        for k, v in log_dict.items():
            if k not in [
                    'image_path', 'initial_reward', 'initial_action',
                    'ref_size', 'upload_size'
            ]:
                print("%s:%.2f" % (k, v), end='\t')
        print('\n')

        plot_keys = [
            'accuracy', 'size_reward', 'reward', 'action', 'recent_accuracy',
            'upload_size', 'agent_epsilon', 'agent_accuracy', 'recent_reward',
            'explor_rate'
        ]
        plot_durations(np.array(
            [running_agent.running_log[key] for key in plot_keys]),
                       title_list=plot_keys)

        if idx % 5 == 0:
            plt.savefig('evaluation_results/running_retrain_fullDNIM.png',
                        dpi=100)

            with open('evaluation_results/image_reference_cache.defaultdict',
                      'wb') as f:
                pickle.dump(ref_cache, f)

            rm.save(running_agent.running_log,
                    name='DNIM_agent_on_imagenet',
                    topic="AgentRetrain",
                    comment="retrain running log with 0.5 exploration rate",
                    replace_version='latest')
コード例 #6
0
                    sizes['origin'].append(info['average size'])
                    accuracies['origin_id'].append(count)
                    sizes['origin_id'].append(count)

                if not done_flag:
                    agent.store_transition(observation, action, reward,
                                           new_observation)

                if agent.memory.index >= 3 * 64:
                    print("\ttraining...")
                    agent.learn(epochs=10)

                if done_flag:
                    agent.learn(epochs=20)
                    break

                observation = new_observation

    except KeyboardInterrupt as e:
        print("Exit...")
    finally:
        print("saving...")
        rm.save(accuracies,
                name="accuracies",
                topic="AgentTest",
                comment="all accuracies info")
        rm.save(sizes,
                name="sizes",
                topic="AgentTest",
                comment="all sizes info")
コード例 #7
0
                    if step_count <= 100:
                        agent.curr_exploration_rate = 1
            else:
                break

            plot_part.append(
                np.array([
                    info['acc_r'], info['size_r'], reward,
                    agent.curr_exploration_rate, action
                ]))
            if step_count % 5 == 0:
                plot_y.append(np.mean(plot_part, axis=0))
                plot_durations(np.array(plot_y))

            features = new_features

            if step_count % 20 == 0:
                print(agent.q_table)
                print(np.argmax(agent.q_table, axis=1))

            if step_count >= 380:
                break

        rm.save(
            performance,
            name='training_log',
            topic="AgentTrain",
            comment="Baidu environment's training log",
            # replace_version='latest'
        )
コード例 #8
0
        for image_name in np.random.choice(os.listdir(
                "%s/%s" % (imagenet_data_path, img_class)),
                                           size=samples_per_class):
            sample_image_path = ("%s/%s/%s" %
                                 (imagenet_data_path, img_class, image_name))

            image_paths.append(sample_image_path)
    return image_paths


def size_Q(img, Q):
    f = BytesIO()
    img.save(f, format="JPEG", quality=Q)
    return len(f.getvalue())


rm = ResultManager('results')
sample_set = gen_sample_set('/home/hsli/imagenet-data/train/', 1)
aver_size_list = []
for i in np.arange(1, 101, 1):
    print("Parsing %s" % i)
    aver_size = [
        size_Q(Image.open(path).convert("RGB"), int(i)) for path in sample_set
    ]
    aver_size_list.append(aver_size)

rm.save(aver_size_list,
        name='aver_size',
        topic='measurements',
        comment="finegrained average size upon different Q on 2W images")
コード例 #9
0
        loss, top5 = model.evaluate(model_input_data, model_label, verbose=0)
        top5_list.append(int(top5))
        size_list.append(size)
    minimal_stable_q = 100 - max_continous_idx(top5_list)
    measurement_dict['paths'].append(path)
    measurement_dict['top5_upon_q'].append(top5_list)
    measurement_dict['minimal_stable_q'].append(minimal_stable_q)
    measurement_dict['size_lists'].append(size_list)

    if minimal_stable_q < 100:
        robust_dict['paths'].append(path)
        robust_dict['robustness'].append(minimal_stable_q)

top5_data = np.array(measurement_dict['top5_upon_q'])
top5_upon_q_list = []
for i in range(100):
    top5_upon_q_list.append(sum(top5_data[:, i]) / len(top5_data[:, i]))

banchmark['accuracies'] = top5_upon_q_list[::-1]
banchmark['qualities'] = [i + 1 for i in range(100)]
banchmark['sizes'] = np.mean(np.array(measurement_dict['size_lists']),
                             axis=0)[::-1].tolist()

rm.save(banchmark,
        name="Banchmark_JPEG_ResNet50",
        topic="Banchmark",
        comment="accuracies and sizes upon each Q, JPEG and ResNet50 on 5K")
rm.save(robust_dict,
        name="Robustness_JPEG_ResNet50",
        topic="Dataset",
        comment="robust dataset for JPEG and ResNet50(top-5)")
コード例 #10
0
            # Plot
            plot_part.append(np.array([info['acc_r'], info['size_r'], reward, agent.curr_exploration_rate, action]))
            if step_count % 10 == 0:
                plot_y.append(np.mean(plot_part, axis=0))
                plot_durations(np.array(plot_y))
                env.update_cache('evaluation_results/image_reference_cache_amazon.defaultdict')
				# Update locally cached recognition results.

            features = new_features

            if step_count % 20 == 0:
                plt.savefig('evaluation_results/eval_amazon_imagenet.png', dpi=100)
				# In order to observe the running result, I saved the plotted data.

            if step_count % 200 == 0:
                rm.save(train_log,
                        name='eval_amazon_imagenet',
                        topic="AgentTrain",
                        comment="Train an agent on amazon and ImageNet dataset",
                        replace_version='latest'
                        )

                if not EVALUATION:
					# Update RL agent model
                    agent.model.save("evaluation_results/agent_DQN_train_amazon_imagenet.h5")

            if step_count >= 1300 and EVALUATION:
                break