def im2feature(im_name, params): im_train = imread(im_name) (w, h, d) = im_train.shape print_(verbosity, "\tExtracting feature vectors... ") if params["feature"] == "RGBrg": fvec_ = get_norm_rg(im_train) elif params["feature"] == "LogOp": fvec_ = get_log_opponent(im_train) elif params["feature"] == "RGB": fvec_ = get_norm_rg(im_train)[:, :3] elif params["feature"] == "rg": fvec_ = get_norm_rg(im_train)[:, 3:] elif params["feature"] == "BothRGBLOG": fvec_ = get_norm_rg(im_train) fvec_ = np.hstack([fvec_, get_log_opponent(im_train)]) print_(verbosity, "\tClassifying features ...") kmeans = KMeans(n_clusters=params["n_cluster"], tol=.001, n_jobs=4, max_iter=300, n_init=52, verbose=0).fit(fvec_) labels = kmeans.predict(fvec_) fvec = np.zeros(fvec_.shape) for i, lab in enumerate(labels): fvec[i, :] = kmeans.cluster_centers_[lab, :] labels = labels.reshape((w, h)).astype(np.uint8) return labels, kmeans, fvec
def main(params, train): si = ScreenImage() if train: # Initialization trainset = glob(join("face_training", "face*.png")) t0 = time() print_(verbosity, "Begin collecting training Samples") Labels, Samples = get_training_samples(trainset, params) print_(verbosity, "Success. Elapsed: %.2f s." % (time() - t0)) print_(verbosity, "Begin classifier training using %s..." % (params["classifier"])) if params["classifier"] == "NB": clf = GaussianNB() elif params["classifier"] == "RF": clf = RandomForestClassifier() clf.fit(Samples, Labels) pickle.dump([clf, params], open(params["name"], "w")) else: testset = glob(join("face_testing", "face*.png")) print_(verbosity, "Begin classifier prediction...") score = np.zeros(len(testset),) models = glob("._*") for i, testname in enumerate(testset): im_orig = imread(testname) truthname = get_groundname(testname) im_skin = [[] for k in models] title = ["" for k in models] for j, model in enumerate(models): im_truth = rgb2gray(imread(truthname)).astype(np.uint8)*255 pkl = pickle.load(open(model, "r")) clf = pkl[0] params = pkl[1] _, _, fvec = im2feature(testname, params) im_skin[j] = clf.predict(fvec).reshape(im_truth.shape).astype(np.uint8) score = jaccard_similarity_score(im_truth, im_skin[j], normalize=True) title[j] = "%s\nClassifier: %s, Thresh: %.2f\nK: %d, Score: %.2f" \ % (params["classifier"], params["feature"], params["thresh"], params["n_cluster"], score) print_(verbosity, "\tTest %d of %d, Score %.2f\n" % (i+1, len(testset), score)) si.show(testname, [im_orig, im_skin[0], im_skin[1], im_skin[2], im_skin[3], im_skin[4]], ["Original\n%s" % testname, title[0], title[1], title[2], title[3], title[4]])
def get_training_samples(trainset, params): Samples = np.zeros((200, len(params["feature"]))) Labels = np.ones(200,) k = 0 for i, trainname in enumerate(trainset): print_(verbosity, "\tBeginning training and truth image set %d of %d... " % (i+1, len(trainset))) truthname = get_groundname(trainname) im_truth = imread(truthname)[:, :, 0].astype(np.uint8) rgb_lab, kmeans, fvec = im2feature(trainname, params) mask = rgb_lab * im_truth overlap = get_truth_overlap(kmeans, rgb_lab, mask, thresh=params["thresh"]) print_(verbosity, "\tCache Samples/Labels ...\n") for lap in overlap: Samples[k, :] = lap["Center"] Labels[k] = lap["Class"] k += 1 # Remove Missing Labels keepers = Labels != 1 return Labels[keepers], Samples[keepers, :]