siamese_network, cnn = siamese_net(input_shape, l2_penalization, learning_rate) # cnnh5 = r'D:\SpyderWorkSpace\GUI_Ganerator\models\trained_cnn.h5' cnn.load_weights(r'D:\SpyderWorkSpace\GUI_Ganerator\models\trained_cnn.h5') c_size = 200 layer_output = K.function([cnn.layers[0].input], [cnn.get_layer('Dense1').output]) cd_img = r'D:\zhu\chen1\data\pick8\p_app_resize_Td_sts_c_resize512_noui1' txt_dir = r'D:\zhu\chen1\data\pick8\aTrees_dict_app' file_csv = r'F:\2017\zhu\RicoDataset\app_details.csv' st_dir = r'D:\zhu\chen1\data\pick8\p_app_resize_Td_sts' path_file_name = r'D:\SpyderWorkSpace\GUI_Ganerator\data\categories_app_emb' appsl, appsd = get_s_app(file_csv, st_dir) # 按照category中app的数量 for (cat, cat_apps) in appsd.items(): print('\ncategory: ', cat) print('\ncategory_apps: ', cat_apps) train_uis = [] for app in os.listdir(cd_img): if app in cat_apps: app_dir = os.path.join(cd_img, app) for ui in os.listdir(app_dir): ui_dir = os.path.join(app_dir, ui) train_uis.append(ui_dir) # 需要embedding的subtrees # 输入数据,生成embedding c_num = len(train_uis) / c_size # 迭代次数 x_train_embedding = []
if __name__ == '__main__': random.seed(SEED) file_csv = r'app_details.csv' cd_img = r'.\p_app_Td_sts_resized' txt_img = r'.\aTrees_dict_app' st_dir = r'.\p_app_Td_sts' db_dir = r'.\st_bank_app' emb_file = r'.\data\categories_app_emb' m_save_path = r'.\models' # 3 loss NEGATIVE_FILE = '.\samples' appsl, appsd = get_s_app(file_csv, st_dir) appsl1 = [] for (k,v) in appsd.items(): appsl1.append([k,len(v)]) appsl1 = sorted(appsl1, key=lambda x: x[1], reverse=True) _ns = [] _n = 0; _ns.append(_n) # News & Magazines mul_loss = True c_apps = [] c_cats = [] for _n in _ns: c_cat = appsl1[_n][0] c_cats.append(c_cat)