def eval(retrieval_top_n=2): dataset = Fashion_inshop() length = dataset.test_len deep_feats, color_feats, labels = load_feat_db() deep_feats, color_feats, labels = deep_feats[-length:], color_feats[ -length:], labels[-length:] feat_dict = { labels[i]: (deep_feats[i], color_feats[i]) for i in range(len(labels)) } include_once = 0 include_zero = 0 include_times = 0 should_include_times = 0 for iter_id, item_id in enumerate(dataset.test_list): item_imgs = dataset.test_dict[item_id] item_img = random.choice(item_imgs) result = get_deep_color_top_n(feat_dict[item_img], deep_feats, color_feats, labels, retrieval_top_n) keys = list(map(lambda x: x[0], result)) included = list(map(lambda x: x in item_imgs, keys)) should_include_times += (len(item_imgs) - 1) include_once += (1 if included.count(True) >= 2 else 0) include_zero += (1 if included.count(True) <= 1 else 0) include_times += (included.count(True) - 1) if iter_id % 10 == 0: print("{}/{}, is included: {}/{}, included times: {}/{}".format( iter_id, len(dataset.test_list), include_once, include_once + include_zero, include_times, should_include_times)) return include_times, should_include_times, include_once, include_zero
from file import Anno, Eval import random import re #----- CONFIG -----# # set static url path app = flask.Flask(__name__, static_url_path='/static') app.config['DEBUG'] = True # first part of path of images hosted by aws static_path = "https://capstone-deepfashion.s3.us-east-2.amazonaws.com/" #-------- FEATURES DATA -----------# deep_feats, color_feats, labels = load_feat_db() #----- ROUTES -----# @app.route("/") def random_query_sample(batch=1): '''Get an image from the test dataset, upper wear only''' ann = Anno(is_train=False) paths = [] for i in range(batch): ran = random.randrange(0, len(ann)) paths.append(ann.loc[ran]['image_name']) selected_path = static_path + paths[0] return flask.render_template('query_styling.html',selected_path=selected_path)
# -*- coding:utf-8 -*- from sklearn.cluster import KMeans from retrieval import load_feat_db from sklearn.externals import joblib from config import DATASET_BASE, N_CLUSTERS import os if __name__ == '__main__': feats, _, labels = load_feat_db() #d_feats. c_feats #model = KMeans(n_clusters=N_CLUSTERS, random_state=0, n_jobs=-1).fit(feats) model = KMeans(n_clusters=N_CLUSTERS, random_state=1, n_jobs=1).fit(feats) model_path = os.path.join(DATASET_BASE, r'models', r'kmeans.m') joblib.dump(model, model_path)
from sklearn.cluster import KMeans from retrieval import load_feat_db from sklearn.externals import joblib from config import DATASET_BASE, N_CLUSTERS import os if __name__ == '__main__': feats, color, labels = load_feat_db() model = KMeans(n_clusters=N_CLUSTERS, init='k-means++', random_state=0, n_jobs=-1).fit(feats) model_path = os.path.join(DATASET_BASE, r'models', r'kmeans.m') joblib.dump(model, model_path)
# if __name__ == '__main__': # feats, labels = load_feat_db() # model = KMeans(n_clusters=N_CLUSTERS, random_state=0, n_jobs=-1).fit(feats) # model_path = os.path.join(DATASET_BASE, r'models', r'kmeans.m') # joblib.dump(model, model_path) # Modified version for scrapped data if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("--scrapped", help="run kmeans on scrapped dataset rather than on deepfashion", action="store_true") args = parser.parse_args() if args.scrapped: print("Performing kmeans clustering on scrapped dataset.") # feats, labels = load_feat_db(custom=True) feats, color_feats, labels = load_feat_db(custom=True) model = KMeans(n_clusters=N_CLUSTERS, random_state=0, n_jobs=-1).fit(feats) model_path = os.path.join(DATASET_BASE, r'models', r'kmeans_scrapped.m') joblib.dump(model, model_path) else: print("Performing kmeans clustering on deepfashion dataset.") # feats, labels = load_feat_db() feats, color_feats, labels = load_feat_db() model = KMeans(n_clusters=N_CLUSTERS, random_state=0, n_jobs=-1).fit(feats) model_path = os.path.join(DATASET_BASE, r'models', r'kmeans.m') joblib.dump(model, model_path) # feats, labels = load_feat_db() # model = KMeans(n_clusters=N_CLUSTERS, random_state=0, n_jobs=-1).fit(feats)
# -*- coding:utf-8 -*- from sklearn.cluster import KMeans from retrieval import load_feat_db from sklearn.externals import joblib from config import DATASET_BASE, N_CLUSTERS import os if __name__ == '__main__': feats, labels = load_feat_db() model = KMeans(n_clusters=N_CLUSTERS, random_state=0, n_jobs=-1).fit(feats) model_path = os.path.join(DATASET_BASE, r'models', r'kmeans.m') joblib.dump(model, model_path)