# Prepare model. 加载预训练的model
model = load_model(pretrained_model=os.path.join('./DealNet/checkpoint',
                                                 'VGG16', 'VGG16_300epoch.t7'),
                   use_gpu=True)
print("Model load successfully!")
local_dir = './static/image_database/'
# Extract database features.
# 在数据库图片不改变的情况下 选择是否保存特征向量 以节约时间
if args.update:
    # Extract database features.
    for dir_name in os.listdir(local_dir):
        # Prepare data set.
        #print(dir_name)
        data_loader = load_data(
            data_path=local_dir + dir_name + '/',
            batch_size=1,
            shuffle=False,
            transform='default',
        )
        increase_feature, increase_image_paths = extract_feature(
            model=model, dataloaders=data_loader)  # torch.Size([59, 2048])
        increase_feature = torch.from_numpy(increase_feature)
        increase_image_paths = increase_image_paths.tolist()
        gallery_feature = torch.cat((gallery_feature, increase_feature), 0)
        image_paths.extend(increase_image_paths)
        #print(gallery_feature)
        np.save('./retrieval/models/gallery_feature.npy',
                gallery_feature.numpy())
        np.save('./retrieval/models/image_paths.npy', np.array(image_paths))
else:
    gallery_feature = np.load('./retrieval/models/gallery_feature.npy')
    gallery_feature = torch.from_numpy(gallery_feature)
def dynamic_modification_timer(mutex):
    time.sleep(3600)
    mutex.acquire()
    flag = False
    global gallery_feature
    global image_paths
    '''
    增加
    直接读取并加入到特征集中;
    并将图片加密写入数据库
    '''
    base_path = './static/image_database/'

    # Prepare model. 加载预训练的model
    # #可删除部分
    # model = load_model(pretrained_model='./retrieval/models/net_best.pth', use_gpu=True)
    # print("Model load successfully!")
    # gallery_feature = np.load('./retrieval/models/gallery_feature.npy')  #ndarray
    # image_paths = np.load('./retrieval/models/image_paths.npy')
    # gallery_feature = torch.from_numpy(gallery_feature)
    # image_paths = image_paths.tolist()
    # #到这
    update_path = './static/update_images/'
    for path in os.listdir(update_path):
        data_loader = load_data(
            data_path=update_path + path + '/',
            batch_size=1,
            shuffle=False,
            transform='default',
        )
        increase_feature, increase_image_paths = extract_feature(
            model=model,
            dataloaders=data_loader)  # torch.Size([59, 2048]) # tensor list
        # 加密并加入数据库(修改成AES)
        if len(increase_image_paths) != 0:
            flag = True
            print('添加成功')
            conn = dbinfo()
            conncur = conn.cursor()
            try:
                for i in range(0, len(increase_image_paths)):
                    path = increase_image_paths[i]
                    entend_path = path[path.rfind('/') + 1:]
                    fp = open(path, 'rb')
                    AES_data = aes.encrypt(fp.read())
                    #base64_data = mybase64.encode(fp.read())
                    fp.close()
                    try:
                        sql_insertimage = "replace into image_AES (file_name,image_value) VALUE (%s, %s) "
                        conncur.execute(sql_insertimage,
                                        (entend_path, AES_data))
                        seatdic = conncur.fetchall()
                        conn.commit()
                        conn.close()
                    except pymysql.Error as e:
                        print("Error %d %s" % (e.args[0], e.args[1]))
                        sys.exit(1)
                    new_path = base_path + entend_path
                    shutil.copyfile(path, new_path)
                    os.remove(path)
                    increase_image_paths[i] = new_path
            except IOError as e:
                print("Error %d %s" % (e.args[0], e.args[1]))
                sys.exit(1)

            gallery_feature = np.load('./retrieval/models/' + path +
                                      '_feature.npy')
            gallery_feature = torch.from_numpy(gallery_feature)
            image_paths = np.load('./retrieval/models/' + path + '_paths.npy')
            image_paths = image_paths.tolist()
            gallery_feature = torch.cat((gallery_feature, increase_feature), 0)
            image_paths.extend(increase_image_paths)
            # print(gallery_feature)
            # print(image_paths)
            # np.save('./retrieval/models/gallery_feature.npy', gallery_feature.numpy())
            # np.save('./retrieval/models/image_paths.npy', np.array(image_paths))
    '''
    删除
    在数据库中删除,然后用它的文件名在image_path中查找得到下标,删除向量即可;
    '''

    # Prepare model. 加载预训练的model
    # #可删除部分
    # model = load_model(pretrained_model='./retrieval/models/net_best.pth', use_gpu=True)
    # print("Model load successfully!")
    # gallery_feature = np.load('./retrieval/models/gallery_feature.npy')  #ndarray
    # image_paths = np.load('./retrieval/models/image_paths.npy')
    # gallery_feature = torch.from_numpy(gallery_feature)
    # image_paths = image_paths.tolist()
    # #到这
    delete_path = './static/delete_images/'
    for path in os.listdir(delete_path):
        data_loader = load_data(
            data_path=delete_path + path + '/',
            batch_size=1,
            shuffle=False,
            transform='default',
        )
        delete_feature, delete_image_paths = extract_feature(
            model=model,
            dataloaders=data_loader)  # torch.Size([59, 2048]) # tensor list
        if len(delete_image_paths) != 0:
            print('删除成功')
            conn = dbinfo()
            conncur = conn.cursor()
            del_sql = 'delete from image_AES where file_name = %s'
            flag = True
            for i in range(0, len(delete_image_paths)):
                path = delete_image_paths[i]
                os.remove(path)
                delete_image_paths[i] = path[path.rfind('/') + 1:]
                # 删除数据库数据
            try:
                conncur.executemany(del_sql, delete_image_paths)
                conn.commit()
            except:
                conn.rollback()
            conn.close()
            del_list = []
            for i in range(0, len(image_paths)):
                path = image_paths[i]
                extend_path = path[path.rfind('/') + 1:]
                if extend_path in delete_image_paths:
                    del_list.append(i)
                    os.remove(path)
            gallery_feature = np.load('./retrieval/models/' + path +
                                      '_feature.npy')
            gallery_feature = torch.from_numpy(gallery_feature)
            image_paths = np.load('./retrieval/models/' + path + '_paths.npy')
            image_paths = image_paths.tolist()
            gallery_feature = gallery_feature.numpy()
            gallery_feature = np.delete(gallery_feature, del_list, axis=0)
            image_paths = np.delete(image_paths, del_list)
            gallery_feature = torch.from_numpy(gallery_feature)
            image_paths = image_paths.tolist()

        if flag == True:
            np.save('./retrieval/models/gallery_feature.npy',
                    gallery_feature.numpy())
            np.save('./retrieval/models/image_paths.npy',
                    np.array(image_paths))
        else:
            print('未修改')
    mutex.release()
    global thread
    # 重复构造定时器
    thread = threading.Thread(target=dynamic_modification_timer,
                              args=(mutex, ))
    thread.start()
args = parser.parse_args()

if args.update:
    # Create thumb images.  创建缩略图
    create_thumb_images(
        full_folder='./static/image_database/',
        thumb_folder='./static/thumb_images/',
        suffix='',
        height=200,
        del_former_thumb=True,
    )

# Prepare data set.
data_loader = load_data(
    data_path='./static/image_database/',
    batch_size=1,
    shuffle=False,
    transform='default',
)

# Prepare model. 加载预训练的model
model = load_model(pretrained_model=os.path.join('./DealNet/checkpoint',
                                                 'VGG16', 'VGG16_300epoch.t7'),
                   use_gpu=True)
print("Model load successfully!")

# Extract database features.
# 在数据库图片不改变的情况下 选择是否保存特征向量 以节约时间
if args.update:
    # Extract database features.
    gallery_feature, image_paths = extract_feature(
        model=model, dataloaders=data_loader)  # torch.Size([59, 2048])