def create_features(img): img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) features, _ = train.create_features(img, img_gray, label=None, train=False) return features
def gen_predictions(file, h_lick_p, args, model): f_b_name = os.path.basename(file) dst_f_path = os.path.join(output_dir, 'mask_pred_' + f_b_name) dst_f_path_bin = os.path.join(output_dir, 'mask_pred_binarized_' + f_b_name) #Get image and file tmp_img, tmp_labl = get_mask_n_img_f(file) #If file is present for prediction just read in and return else generate #texture features and image from scratch where required if os.path.isfile(dst_f_path + '_not_a_file_result'): return imageio.imread(dst_f_path).flatten(), tmp_labl.flatten() else: #Generate features for model based on initial analysis. features, labls = create_features(os.path.splitext(f_b_name)[0], tmp_img, tmp_labl, h_lick_p, args, train=False) #Minx max scaling same parameters taken from training script features, _ = min_max_scaling(features) predictions = compute_prediction(features, model) #ipdb.set_trace() write_img_to_file(predictions, tmp_img, dst_f_path) #Writing binary logit to file for analysis. gen_img_visual(tmp_img, predictions.reshape((256, 256)), tmp_labl, dst_f_path_bin) return predictions.flatten(), labls
def predict_all(model_file, input_fle, n): w = load_model(model_file) result = [] for sentence in input_fle: phi = defaultdict(lambda: 0) phi = create_features(sentence.lower(), n, phi) result.append(predict_one(w, phi)) return result
def compute_prediction(fn, model): border = int((h_neigh-1)/2) img = cv2.imread(fn, cv2.IMREAD_COLOR) cached_fn = os.path.join(cache_dir, fn.split( os.path.sep)[-1].split('.')[0]+".pkl") if os.path.isfile(cached_fn): features = load_pickle(cached_fn) else: features = train.create_features(img) dump2pickle(features, cached_fn) scaler = load_pickle(os.path.join(model_dir, scaler_fn)) features = features.reshape(-1, features.shape[1]) features = scaler.transform(features) model_predictions = model.predict_proba(features) model_predictions = prob2class(model_predictions) predictions_image = model_predictions.reshape( [img.shape[0]-2*border, img.shape[1]-2*border, -1]) return predictions_image
from train import create_features from train import predict_one from collections import defaultdict import sys with open(sys.argv[1], 'r') as modelfile, open(sys.argv[2], 'r') as testfile: w = defaultdict(lambda: 0) for line in modelfile: spl = line.strip().split('\t') w[spl[0]] = int(spl[1]) for x in testfile: phi = create_features(x) y2 = predict_one(w, phi) print(y2)
# test.py # coding=utf-8 from collections import defaultdict import sys, re, train #重み Weight = defaultdict(lambda: 0) with open("ans", "r") as modelFile: for line in modelFile: key, value = line.split() Weight[key] = float(value) # reg_model = re.compile("^(-?1)(.*)$") with open(sys.argv[1], "r") as inputFile: for line in inputFile: X = line.strip() phi = train.create_features(X) Y = train.predict_one(Weight, phi) print("%d\t%s" % (Y, X))
# test.py # coding=utf-8 from collections import defaultdict import sys, re, train #重み Weight = defaultdict(lambda: 0) with open("ans", "r") as modelFile: for line in modelFile: key, value = line.split() Weight[key] = float(value) # reg_model = re.compile("^(-?1)(.*)$") with open(sys.argv[1], "r") as inputFile: for line in inputFile: X = line.strip() phi = train.create_features(X) Y = train.predict_one(Weight, phi) print ("%d\t%s" % (Y, X))