recent_acc = np.mean(self.agent_memory['accuracy'])
        else:
            recent_reward = np.mean(
                self.agent_memory['reward'][-self.recent_zone:])
            recent_acc = np.mean(
                self.agent_memory['accuracy'][-self.recent_zone:])
        return recent_acc, recent_reward


#

if __name__ == '__main__':
    # api = Baidu()
    # api = FacePP()
    api = AmazonRekognition()
    rm = ResultManager('evaluation_results')

    running_agent = RunningAgent(  # dqn_path='evaluation_results/agent_DQN_train_baidu_DNIM.h5',
        # dqn_path='evaluation_results/agent_DQN_train_face_imagenet.h5',
        # dqn_path='evaluation_results/agent_DQN_train_face_DNIM.h5',
        # dqn_path='evaluation_results/agent_DQN_retrain_face_DNIM.h5',
        # dqn_path='evaluation_results/agent_DQN_retrain_baidu_DNIM.h5',
        # dqn_path='evaluation_results/agent_DQN_initial_baidu_DNIM.h5',
        dqn_path='evaluation_results/agent_DQN_initial_aws_DNIM.h5',
        banchmark_q=75,
        cloud_backend=api,
    )

    # imagenet_paths = _gen_sample_set_imagenet('/home/hsli/gnode02/imagenet-data/train/', 2)
    imagenet_paths = _gen_sample_set_imagenet('/home/imagenet-data/train/', 2)
        if len(self.agent_memory['reward']) < self.recent_zone:
            recent_reward = np.mean(self.agent_memory['reward'])
            recent_acc = np.mean(self.agent_memory['accuracy'])
        else:
            recent_reward = np.mean(
                self.agent_memory['reward'][-self.recent_zone:])
            recent_acc = np.mean(
                self.agent_memory['accuracy'][-self.recent_zone:])
        return recent_acc, recent_reward


#

if __name__ == '__main__':
    api = Baidu()
    rm = ResultManager('evaluation_results')

    running_agent = RunningAgent(
        dqn_path='evaluation_results/agent_DQN_train_baidu_imagenet.h5',
        banchmark_q=75,
        cloud_backend=api,
    )

    imagenet_paths = _gen_sample_set_imagenet(
        '/home/hsli/gnode02/imagenet-data/train/', 3)[-500:]

    test_image_paths = imagenet_paths

    for idx, path in enumerate(test_image_paths):
        error_code, log_dict = running_agent.agent_upload(path)
        if error_code > 0: continue
示例#3
0
        if len(self.agent_memory['reward']) < self.recent_zone:
            recent_reward = np.mean(self.agent_memory['reward'])
            recent_acc = np.mean(self.agent_memory['accuracy'])
        else:
            recent_reward = np.mean(
                self.agent_memory['reward'][-self.recent_zone:])
            recent_acc = np.mean(
                self.agent_memory['accuracy'][-self.recent_zone:])
        return recent_acc, recent_reward


#

if __name__ == '__main__':
    api = Baidu()
    rm = ResultManager('evaluation_results')

    running_agent = RunningAgent(
        dqn_path='evaluation_results/agent_DQN_train_baidu_DNIM.h5',
        banchmark_q=75,
        cloud_backend=api,
    )

    # imagenet_paths = _gen_sample_set_imagenet('/home/hsli/gnode02/imagenet-data/train/', 2)

    np.random.shuffle(subset)
    test_image_paths = subset + rm.load(7)['image_path'][:1200] + rm.load(
        7)['image_path']

    for idx, path in enumerate(test_image_paths):
        error_code, log_dict = running_agent.agent_upload(path)
示例#4
0
np.random.seed(2)

BATCH_SIZE = 128


def list_split(l, size):
    return [l[m:m + size] for m in range(0, len(l), size)]


if __name__ == '__main__':
    images_dir = '/home/hsli/gnode02/imagenet-data/train/'

    feature_extractor = load_model(
        'checkpoints/mobilenetv2_predictor_2W_acc_0.6955_epoch50.hdf5')

    rm = ResultManager('results')
    agent_acc_size_dict = []
    origin_acc_size_dict = []

    agent = Q_Agent(s_dim=2,
                    a_dim=10,
                    epsilon_decay=0.9993,
                    epsilon_min=0.2,
                    lr=0.1,
                    gamma=0.95)

    step_count = 0

    env = BatchImgEnvironment(imagenet_train_path=images_dir,
                              samples_per_class=3,
                              backbone_model=InceptionV3(),
示例#5
0
            recent_reward = np.mean(self.agent_memory['reward'])
            recent_acc = np.mean(self.agent_memory['accuracy'])
        else:
            recent_reward = np.mean(
                self.agent_memory['reward'][-self.recent_zone:])
            recent_acc = np.mean(
                self.agent_memory['accuracy'][-self.recent_zone:])
        return recent_acc, recent_reward


#

if __name__ == '__main__':
    api = Baidu()
    # api = FacePP()
    rm = ResultManager('evaluation_results')

    running_agent = RunningAgent(  # dqn_path='evaluation_results/agent_DQN_train_baidu_DNIM.h5',
        # dqn_path='evaluation_results/agent_DQN_train_face_imagenet.h5',
        # dqn_path='evaluation_results/agent_DQN_train_face_DNIM.h5',
        # dqn_path='evaluation_results/agent_DQN_retrain_face_DNIM.h5',
        # dqn_path='evaluation_results/agent_DQN_retrain_baidu_DNIM.h5.retrain',
        # dqn_path='evaluation_results/agent_DQN_initial_baidu_DNIM.h5',
        # dqn_path='evaluation_results/agent_DQN_retrain_baidu_DNIM.h5.retrain.retrain_imagenet1',
        # dqn_path='evaluation_results/agent_DQN_baidu_all_DNIM.h5',
        # dqn_path='evaluation_results/agent_DQN_baidu_FLIR.h5',
        dqn_path='evaluation_results/agent_DQN_train_1.6K_baidu.h5',
        banchmark_q=75,
        cloud_backend=api,
    )
from keras.utils import multi_gpu_model
from keras.models import Model
from keras import regularizers
from keras.layers import Dense, GlobalAveragePooling2D, Dropout, BatchNormalization, Flatten
from keras.applications.mobilenetv2 import preprocess_input
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
import tensorflow as tf
import cv2
import os

tf.set_random_seed(2)
np.random.seed(2)

os.environ["CUDA_VISIBLE_DEVICES"] = "1, 2"

rm = ResultManager('results')

train_size = 19000
test_size = 2000

train_img_paths = [
    path.replace('gnode02/', '') for path in rm.load(6)['paths'][:train_size]
]
train_label_data = np.array([(0., 1.) if item > 3 else (1., 0.)
                             for item in rm.load(6)['robustness'][:train_size]
                             ])

test_img_paths = [
    path.replace('gnode02/', '') for path in rm.load(6)['paths'][-test_size:]
]
test_label_data = np.array([(0., 1.) if item > 3 else (1., 0.)
    plt.subplot(514)
    plt.plot(y[:, 3])
    plt.ylabel('epsilon')
    plt.subplot(515)
    plt.plot(y[:, 4])
    plt.ylabel('action')
    plt.pause(0.0001)


if __name__ == '__main__':
    images_dir = '/home/hsli/imagenet-data/train/'

    feature_extractor = load_model(
        'checkpoints/mobilenetv2_predictor_2W_acc_0.6955_epoch50.hdf5')

    rm = ResultManager('results')
    agent_acc_size_dict = []
    origin_acc_size_dict = []

    agent = Q_Agent(s_dim=2,
                    a_dim=10,
                    epsilon_decay=0.991,
                    epsilon_min=0.02,
                    lr=0.4,
                    gamma=0.92)

    step_count = 0

    env = EnvironmentAPI(imagenet_train_path=images_dir,
                         samples_per_class=37,
                         cloud_agent=FacePP())
示例#8
0
from keras.applications import MobileNetV2

from Agents import PG_Agent, DDPG_Agent
from ImageCompressionEnvironment import BatchImgEnvironment

np.random.seed(2)

if __name__ == '__main__':
    model = MobileNetV2()
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    images_dir = '/home/hsli/imagenet-data/train/'

    rm = ResultManager('results')
    agent_acc_size_dict = []
    origin_acc_size_dict = []

    agent = DDPG_Agent(a_dim=64, s_dim=64, train_batchsize=128)

    epsilon = 0.8  # control exploration
    epsilon_decay = 0.99

    env = BatchImgEnvironment(imagenet_train_path=images_dir,
                              samples_per_class=1024,
                              step_batch_size=128,
                              deep_model=model)

    try:
    def estimate(self):
        if len(self.agent_memory['reward']) < self.recent_zone:
            recent_reward = np.mean(self.agent_memory['reward'])
            recent_acc = np.mean(self.agent_memory['accuracy'])
        else:
            recent_reward = np.mean(self.agent_memory['reward'][-self.recent_zone:])
            recent_acc = np.mean(self.agent_memory['accuracy'][-self.recent_zone:])
        return recent_acc, recent_reward


#

if __name__ == '__main__':
    api = Baidu()
    # api = FacePP()
    rm = ResultManager('result')

    running_agent = RunningAgent(# dqn_path='evaluation_results/agent_DQN_train_baidu_DNIM.h5',
                                 # dqn_path='evaluation_results/agent_DQN_train_face_imagenet.h5',
                                 # dqn_path='evaluation_results/agent_DQN_train_face_DNIM.h5',
                                 # dqn_path='evaluation_results/agent_DQN_retrain_face_DNIM.h5',
                                 # dqn_path='evaluation_results/agent_DQN_retrain_baidu_DNIM.h5',
                                 dqn_path='result/agent_DQN_train_baidu_imagenet.h5',
                                 banchmark_q=75,
                                 cloud_backend=api,
                                 )

    # imagenet_paths = _gen_sample_set_imagenet('/home/hsli/gnode02/imagenet-data/train/', 2)
    # imagenet_paths = _gen_sample_set_imagenet('/home/imagenet-data/train/', 2)

    with open("result/imagenet_baidu_ref2000.pkl", "rb") as file:
    plt.subplot(515)
    plt.plot(y[:, 4])
    plt.ylabel('action')
    plt.pause(0.0001)


if __name__ == '__main__':
    # images_dir = '/home/hsli/gnode02/imagenet-data/train/'
    images_dir = '/home/imagenet-data/train/'

    feature_extractor = MobileNetV2(include_top=False)
    x = feature_extractor.output
    x = AveragePooling2D(pool_size=(4, 4))(x)
    feature_extractor = Model(inputs=feature_extractor.input, outputs=x)

    rm = ResultManager('evaluation_results')
    agent_acc_size_dict = []
    origin_acc_size_dict = []

    agent = DQN_Agent(s_dim=1280,
                      a_dim=10,
                      epsilon_decay=0.99,
                      epsilon_min=0.02,
                      gamma=0.95,
                      replay_batchsize=256)

    if EVALUATION:
        agent.model = load_model(
            'evaluation_results/agent_DQN_train_amazon_imagenet.h5'
        )  # If in evaluation phase, replace this with the actual pretrained model
        agent.curr_exploration_rate = 0
from ImageCompressionEnvironment import BatchImgEnvironment

np.set_printoptions(precision=3)

tf.set_random_seed(2)
np.random.seed(2)

EVALUATION = True

if __name__ == '__main__':
    images_dir = '/home/hsli/gnode02/imagenet-data/train/'

    feature_extractor = load_model(
        'checkpoints/mobilenetv2_predictor_2W_acc_0.6955_epoch50.hdf5')

    rm = ResultManager('results')
    agent_acc_size_dict = []
    origin_acc_size_dict = []

    agent = Q_Agent(s_dim=2,
                    a_dim=10,
                    epsilon_decay=0.9993,
                    epsilon_min=0.2,
                    lr=0.1,
                    gamma=0.95)

    step_count = 0

    env = BatchImgEnvironment(imagenet_train_path=images_dir,
                              samples_per_class=2,
                              backbone_model=InceptionV3(),
        for image_name in np.random.choice(os.listdir(
                "%s/%s" % (imagenet_data_path, img_class)),
                                           size=samples_per_class):
            sample_image_path = ("%s/%s/%s" %
                                 (imagenet_data_path, img_class, image_name))

            image_paths.append(sample_image_path)
    return image_paths


def size_Q(img, Q):
    f = BytesIO()
    img.save(f, format="JPEG", quality=Q)
    return len(f.getvalue())


rm = ResultManager('results')
sample_set = gen_sample_set('/home/hsli/imagenet-data/train/', 1)
aver_size_list = []
for i in np.arange(1, 101, 1):
    print("Parsing %s" % i)
    aver_size = [
        size_Q(Image.open(path).convert("RGB"), int(i)) for path in sample_set
    ]
    aver_size_list.append(aver_size)

rm.save(aver_size_list,
        name='aver_size',
        topic='measurements',
        comment="finegrained average size upon different Q on 2W images")
示例#13
0
    sums = []
    ids = []
    curr_sum = 0
    for idx, item in enumerate(l):
        if item == 1:
            curr_sum += 1
            if curr_sum == len(l):
                return len(l) - 1
        else:
            sums.append(curr_sum)
            curr_sum = 0
            ids.append(idx - 1)
    return np.array(ids)[np.argmax(sums)]


rm = ResultManager('evaluation_results')

sample_paths, sample_labels = gen_sample_set(
    '/home/hsli/gnode02/imagenet-data/train/', 5)
model_labels = to_categorical(sample_labels, 1000)

model = ResNet50()
model.compile('adam', 'categorical_crossentropy',
              ['top_k_categorical_accuracy'])

robust_dict = defaultdict(list)
measurement_dict = defaultdict(list)
banchmark = defaultdict(list)
for idx, path in enumerate(sample_paths):
    if idx % 100 == 0:
        print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), end='\t')
               loc=8, ncol=3)

    plt.subplots_adjust(top=1.0,
                        bottom=0.33,
                        left=0.105,
                        right=0.91,
                        hspace=0.2,
                        wspace=0.2)
    plt.show()

x = np.linspace(0, 2 * np.pi, 50)
y = np.sin(x)
plt.plot(x, y)
plt.show()

rm = ResultManager('evaluation_results')
rm.print_meta_info()
not_reload = rm.load(16)
reload = rm.load(20)

initial_log = reload
# plot_keys = ['status', 'step_count', 'comp_size', 'upload_size']
# plot_keys = ['agent_epsilon', 'agent_accuracy', 'recent_reward', 'explor_rate']
plot_keys = ['agent_epsilon', 'recent_reward', 'explor_rate']
plot_durations(np.array([initial_log[key] for key in plot_keys]),
               title_list=plot_keys)
plot_keys = ['status', 'recent_accuracy', 'accuracy', 'upload_size']
plot_durations(np.array([initial_log[key] for key in plot_keys]),
               title_list=plot_keys)
# # evaluation_framework(not_reload)
evaluation_framework(rm.load(17))
示例#15
0
    plt.ylabel('epsilon')
    plt.subplot(515)
    plt.plot(y[:, 4])
    plt.ylabel('action')
    plt.pause(0.0001)


if __name__ == '__main__':
    images_dir = '/home/hsli/gnode02/imagenet-data/train/'

    feature_extractor = MobileNetV2(include_top=False)
    x = feature_extractor.output
    x = AveragePooling2D(pool_size=(4, 4))(x)
    feature_extractor = Model(inputs=feature_extractor.input, outputs=x)

    rm = ResultManager('evaluation_results')
    agent_acc_size_dict = []
    origin_acc_size_dict = []

    agent = DQN_Agent(s_dim=1280,
                      a_dim=10,
                      epsilon_decay=0.99,
                      epsilon_min=0.02,
                      gamma=0.95,
                      replay_batchsize=256)

    if EVALUATION:
        agent.model = load_model('evaluation_results/agent_DQN_train_amazon_imagenet.h5')	# If in evaluation phase, replace this with the actual pretrained model
        agent.curr_exploration_rate = 0

    step_count = 0