def predict(new_path):
    frozen_inference_graph_path =  './frozen_inference_graph_pb/'+'frozen_inference_graph.pb'
    images_dir = new_path
    count=0
    model = predictor.Predictor(frozen_inference_graph_path)
    
    image_files = glob.glob(os.path.join(images_dir, '*.*'))

    val_results = []
    predicted_count = 0
    num_samples = len(image_files)
    for image_path in image_files:
        predicted_count += 1
        if predicted_count % 100 == 0:
            print('Predict {}/{}.'.format(predicted_count, num_samples))

        image_name = image_path.split('/')[-1]
        image = Image.open(image_path)
        image = np.asarray(image)
        if image is None:
            print('image %s does not exist.' % image_name)
            continue
        #image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        pred_label = int(model.predict([image])[0])
        count=pred_label+count
        print('Image Name: %s' % image_name)
        print('Pred Label: %d' %pred_label)
    return count
def predict(sampledir, outputpath):
    featurelist = './sample/features.jsonl' # parsed data feature input file
    features = utility.readonelineFromjson(featurelist)
    feature_parser = utility.FeatureType()
    featureobjs = feature_parser.parsing(features)

    lgbmodelpath = './sample/aimodel/GradientBoosted_model.txt'
    xgbmodelpath = './sample/aimodel/X_GradientBoosted_model.txt'
    rfmodelpath = './sample/aimodel/RandomForest_model.txt'
    
    start = time.strftime('%m-%d, %H:%M:%S', time.localtime(time.time()))
    
    #all sample testing
    
    predict = predictor.Predictor(sampledir, featureobjs, outputpath)
    predict.lgbmodel_load(lgbmodelpath)
    #predict.xgbmodel_load(xgbmodelpath)
    #predict.rfmodel_load(rfmodelpath)
    
    predict.run()
    #predict process
    
    end = time.strftime('%m-%d, %H:%M:%S', time.localtime(time.time()))
    
    #email alram routine
    subject = "capstone2 debug info"
    message = "predict csv save done\n"
    message += "runtime check\n"
    message += "start time : "+start + "\n"
    message += "end time : "+end + "\n"
    email_util.debugmail(subject, message)
Beispiel #3
0
def runner(mode):

    hash_table = data_structure.Hash()
    predict = predictor.Predictor()

    # update_csv_lst()

    # 0: test
    # 1: full
    if mode == 0:
        csv_lst = ['../Support Files/CSV Files/companylist-2.csv']
    elif mode == 1:
        csv_lst = [
            '../Support Files/CSV Files/companylist.csv',
            '../Support Files/CSV Files/companylist-2.csv',
            '../Support Files/CSV Files/companylist-3.csv'
        ]

    insert_from_csv(csv_lst, hash_table, 7.50)

    print('Total tickers to search:', hash_table.num_items, '\n')

    print('Prefetching webpages:')
    hash_table.prefetch_webpages()

    print('\n' + 'Screening stocks:')
    predict.init_run(hash_table)

    print('\n' + 'Checking watchlist:')
    predict.check_watchlist(hash_table)
Beispiel #4
0
def construct_predictor():
    global predict
    fetch_output()
    if output is not None:
        predict = predictor.Predictor(data['ieee']['Abstract'],
                                      output['action'])
    else:
        predict = None
Beispiel #5
0
	def Search(self):
		n = 15
		thresh = 70

		predict = pd.Predictor()
		predict.add_data(self.tpos)
		predict.plot()

		res = 0
		for j in range(1,n):
			res = res + abs(predict.yAxis[-j] - self.data[-j][1])
		print "el resultado es:",res," con:",len(predict.yAxis)," datos"
		if res < thresh:
			self.yAxis = predict.yAxis
			self.n_data = len(self.tpos)
			print "EL NUMERO DE DATOS ES:",self.n_data
Beispiel #6
0
    def View_Result(self, ptr):
        os.system('clear')
        n = 15
        if len(ptr[0]) > (n - 1):
            predict = pd.Predictor()

            res = 0
            predict.add_data(ptr[1])
            predict.plot()
            print ptr[0]
            print "\n"
            for j in range(1, n):
                t = ptr[0]
                res = res + abs(predict.yAxis[-j] - t[-j][1])
            print predict.yAxis
            print res
Beispiel #7
0
 def Search(self):
     n = 15
     thresh = 85
     predict = pd.Predictor()
     for i in range(len(self.data)):
         if len(self.tpos[:i]) > (n - 1):
             res = 0
             predict.add_data(self.tpos[:i])
             predict.plot()
             for j in range(1, n):
                 t = self.data[:i]
                 res = res + abs(predict.yAxis[-j] - t[-j][1])
             if res < thresh:
                 self.n_data = len(self.tpos[:i])
                 print "EL NUMERO DE DATOS ES:", self.n_data
                 break
Beispiel #8
0
def predicatorTest(random=False, dividor=10):
    films = getAllFilms()
    toLearn = []
    toPredict = []
    for film in films:
        if film.id %  dividor == 0:
            toPredict.append(film)
        else:
            toLearn.append(film)

    predicatorObj = predictor.Predictor(toLearn, random)
    predictionResult = predictor.PredictionResult(predicatorObj, toPredict)
    print "\nLearning on " + str(len(toLearn)) + " films"
    print "Tested on " + str(len(toPredict)) + " films"
    print "Average difference: " + str(predictionResult.avgDiff)
    return predictionResult, predicatorObj, toPredict
Beispiel #9
0
def predictor_runner(model_name, pipe):
    p = predictor.Predictor(model_name=model_name)
    pipe.send(True)
    while 1:
        req_pipe, seed, t = pipe.recv()
        logging.info("received generation request from " +
                     model_name + " predictor")
        try:
            res = None
            if t == SMALL_TYPE:
                res = p.simple(text=seed)
            elif t == LARGE_TYPE:
                res = p.large(text=seed)
            else:
                raise Exception("unknown prediction type")
            req_pipe.send((res, 1))
        except Exception as e:
            req_pipe.send((e, -1))
Beispiel #10
0
def m_function(software):

    G = graph_generation.create_predicate_graph(software)
    #vis.visualize(G)

    pars = parameters.parameters(G)

    import predictor as pred
    predictor = pred.Predictor(True,
                               svr_params=['rbf', 1000, 1, 0.01],
                               test_predictor_acc=False)

    rectangles, sorted_results, trisection_counter, predictor = find_best_settings_direct(
        G=G,
        filter_dim=5,
        epsilon=10**(-4),
        trisection_lim=70,
        train_iterations=3,
        predictor=predictor)
    return rectangles, sorted_results, trisection_counter, predictor

    #res = main_ops.cross_validate_svr()

    #pars = parameters.parameters(G, False, False, [], [], True, [[0.16, 0.83, 0.83, 0.83, 0.16]], [32 for i in range(1)], 1, 2200)
    #res, conf, pred = training.train(G, pars, predictor)
    """
    pars = [parameters.parameters(G, False, False, [], [], True, [[0.5, 0.7, 0.3, 0.5, 0.5, 0.1, 0.5, 0.1]], [100], 1, math.inf) for x in range(5)]
    results = []
    x_plot = []
    configs = []
    for par in pars:
        res, conf, pred = training.train(G, par, predictor)
        results.append(res[2])
        x_plot.append(par)
    
    fig1 = plt.figure(1, figsize=(10,10))
    
    plot7 =  plt.subplot(15 if predictor.predictor_on else 3, 1, 1)
    y_plot = results
    x_plot = [i for i in range(len(y_plot))]
    plot7.set_xticks(np.arange(len(x_plot)))            
    plot7.plot(x_plot, y_plot)
    """
    """
Beispiel #11
0
    def _create_predictor(hw_id: str,
                          percentile: float) -> predictor.Predictor:
        _predictor = predictor.Predictor(nodetype=hw_id, percentile=percentile)
        _predictor.assign_headers("headers.json")
        _predictor.assign_groundtruth("groundtruth.json")
        _predictor.assign_user_boundary("user_boundary.json")

        from clustering_alg import MeanShift
        from clustering_score import VMeasure
        from distance import AveragePairCorrelation
        from normalizer import MinMax
        from optimizer import SimAnnealing
        _predictor.configure(clustering_alg=MeanShift(),
                             clustering_score=VMeasure(),
                             distance=AveragePairCorrelation(),
                             normalizer=MinMax(),
                             optimizer=SimAnnealing(),
                             boundary_percentage=140)
        return _predictor
Beispiel #12
0
    def __init__(self, width, prediction, rob):
        self.pc = 0

        self.iq = []
        self.opq = []
        self.rf = [0] * 33  # (32 and an extra as a dummy for sw ROB entries)
        self.mem = []
        self.rob_size = rob
        self.rob = ROB(self.rob_size)
        self.lsq = LSQ()
        self.rat = [None] * 128
        self.rs = RS()
        self.eq = []
        self.wbq = []

        self.new_iq = []
        self.new_opq = []
        self.new_eq = []

        self.predictor = predictor.Predictor(prediction)
        self.super = width
        self.cycles = 0
        self.executed = 0
 def __init__(self, capital):
     self.predictor = predictor.Predictor()
     self.capital = capital
     print(f'You have ${capital}. Every time the system successfully predicts your next press, you lose $1.')
     print('Otherwise, you earn $1. Print "enough" to leave the game. Let\'s go!')
Beispiel #14
0
import predictor
import alpaca

predictor = predictor.Predictor()
buys = predictor.getPredictions()
maxPosition = 400000 / len(buys)
trader = alpaca.AlpacaTrader()

for stock in buys:
    price = trader.getPrice(stock)
    qty = maxPosition // price
    trader.makeTrade(stock, qty)
Beispiel #15
0
        predictor_id = str(i) + str(j) + str(1)

        # create predictor directory
        current_path = os.getcwd()
        predictor_report_path = current_path + '/' + 'cv_reports' + '/' + predictor_id
        if not os.path.exists(predictor_report_path):
            os.makedirs(predictor_report_path)

        # write to the report file
        cv_report_file.write("\nPREDICTOR_ID: {}\n".format(predictor_id))
        data_interface.write_information(cv_report_file)

        model_predictor = predictor.Predictor(
            predictor_id=predictor_id,
            data_interface=data_interface,
            report_directory=predictor_report_path,
            development_set_flag=False,
            test_set_flag=True,
            early_stopping_set='test')

        # train model
        model_predictor.train(min_epoch_number=15)

        # obtain results
        dev_results = model_predictor.get_test_results()
        dev_metric = dev_results[0]
        dev_prediction = dev_results[1]
        dev_logits = dev_results[2]
        dev_truth_value = dev_results[3]

        # update predictions and results
Beispiel #16
0
    def View(self):
        global data, data2, tdata, tdata2, band
        begin = 0
        num_data = 0
        cv2.namedWindow('Result', cv2.CV_WINDOW_AUTOSIZE)
        cv2.createTrackbar('N_datos', 'Result', num_data, 60, self.update)
        cv2.createTrackbar('Begin', 'Result', begin, 60, self.update)

        self.img_temp = self.img.copy()
        predict = pd.Predictor()
        data = self.data[:]
        data2 = self.data[:]
        tdata = self.tpos[:]
        tdata2 = self.tpos[:]

        # TACKING
        w = len(self.points)
        for i in range(0, w - 1):
            cv2.line(self.img, (self.points[i]), (self.points[i + 1]),
                     (255, 0, 0), 1)

        w = len(self.data)
        for i in range(0, w - 1):
            cv2.line(self.img, ((self.data[i][0], self.data[i][1])),
                     ((self.data[i + 1][0], self.data[i + 1][1])), (0, 0, 255),
                     1)
        cv2.setTrackbarPos("N_datos", "Result", self.n_data)

        while True:
            begin = cv2.getTrackbarPos('Begin', 'Result') + 90
            num_data = cv2.getTrackbarPos('N_datos', 'Result')

            cv2.line(self.img_temp, (begin, 0), (begin, 240), (200, 200, 200),
                     1)
            cv2.line(self.img_temp, (begin + num_data, 0),
                     (begin + num_data, 240), (20, 20, 20), 1)

            if band == False and num_data > 2:
                predict.add_data(tdata2)
                predict.plot()
                band = True

            if len(predict.yAxis) > 0:
                posH = begin
                for i in range(len(predict.yAxis) - 1):
                    posH = posH + 1
                    try:
                        if posH > (begin + num_data):
                            cv2.line(self.img_temp, ((posH, predict.yAxis[i])),
                                     ((posH + 1, predict.yAxis[i + 1])),
                                     (255, 0, 0), 1)
                        else:
                            cv2.line(self.img_temp, ((posH, predict.yAxis[i])),
                                     ((posH + 1, predict.yAxis[i + 1])),
                                     (0, 255, 255), 1)
                    except:
                        pass

            cv2.imshow('Result', self.img_temp)
            k = cv2.waitKey(1)
            if k == 1048688:
                cv2.imwrite("image2.png", self.img_temp)
            if k == 1048603:
                self.end_cam()
                break
Beispiel #17
0
 def __init__(self):
     #model_dir = "/microservice/checkpoint"
     #checkpoint_path = get_current_model_path(model_dir)
     #checkpoint_path = '/tmp/cibn/b8f7a66c-b782-4743-bad8-2f7990cd6c31/model.ckpt-204' 
     checkpoint_path = get_current_model_path('.')
     self.model = pd.Predictor(checkpoint_path)
def predicted(dict_result):
    data = result_converter(dict_result)
    predictor = p.Predictor()
    result = predictor.nnPredict(data)
    return result
Beispiel #19
0
import sys

if sys.argv[1].lower() == 'prepfortrain':
    import PrepForTrain
    PrepForTrain.Prep()
if sys.argv[1].lower() == 'trainer':
    import trainer
    # improve here
if sys.argv[1].lower() == 'prediction':
    import predictor
    predictor.Predictor().tell(sys.argv[2])
Beispiel #20
0
        pad_top = (width - height) // 2
        expanded_image[pad_top:pad_top + height] = image
    return expanded_image


if __name__ == '__main__':
    images_dir = './train_test'
    trimaps_dir = './datasets/trimaps'
    output_dir = './train_test_output'
    frozen_inference_graph_path = ('./training/frozen_inference_graph_pb/' +
                                   'frozen_inference_graph.pb')

    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    matting_predictor = predictor.Predictor(frozen_inference_graph_path,
                                            gpu_index='1')

    for image_path in glob.glob(os.path.join(images_dir, '*.*')):
        image = cv2.imread(image_path)
        image_name = image_path.split('/')[-1]
        trimap_path = os.path.join(trimaps_dir, image_name)
        trimap = cv2.imread(trimap_path, 0)
        trimap_b = data_provider.trimap(trimap)

        images = np.expand_dims(image, axis=0)
        trimaps = np.expand_dims(trimap_b, axis=0)

        alpha_mattes, refined_alpha_mattes = matting_predictor.predict(
            images, trimaps)

        alpha_matte = np.squeeze(alpha_mattes, axis=0)


if __name__ == "__main__":    
    weight_path = FLAGS.weight_path
    positive_img_dir = FLAGS.positive_img_dir
    negative_img_dir = FLAGS.negative_img_dir
    output_dir = FLAGS.output_dir
    gpu_device = FLAGS.gpu_device
    threshold = 0.95
    

    print('loading model...')
    weight_type = weight_path.split('.')[-1]
    if weight_type == 'pb':
        model = predictor.Predictor(weight_path, gpu_index=gpu_device)
    else:
        # ckpt
        model = predictor_ckpt.Predictor(weight_path, gpu_index=gpu_device)
    
    
    # 为了分析模型,将预测错误的样本保存出来
    # 将所有测试结果保存在'output_dir'中
    if os.path.exists(output_dir): # 确保不会和以前的结果冲突
        raise RuntimeError('{} has exist, please check'.format(output_dir))
    else:
        os.mkdir(output_dir)
    
    # compute recall & precision
    print("\n\n-------------------evaluate recall & precision--------------------")
    # save False Negative sample in recall_mis_save_path
import cv2
import imutils
import predictor

print("Running...")

cap = cv2.VideoCapture('../data/James.mp4')
# cap = cv2.VideoCapture(0)

p1 = predictor.Predictor()
frame_interval = predictor.runDiagnostic()

while (cap.isOpened()):
    ret, frame = cap.read()

    counter = 0
    while (cap.isOpened()):
        ret, frame = cap.read()
        rs = imutils.resize(frame,width=500)
        # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # resized_gray = imutils.resize(gray, width=500)
        # cv2.imshow('frame', resized_gray)
        counter += 1
        if counter%15 == 0:
            p1.next([frame])
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
            counter = 0
        cv2.imshow("color",rs)
            #cv2.imshow('color', mouths)
    'predictor_1.tsv', 'predictor_1_high_scored.tsv',
    'predictor_1_independent_validation.tsv', 'predictor_1_all_unused.tsv',
    'predictor_1_all_unused_pairs.tsv', 'predictor_2.tsv',
    'predictor_2_high_scored.tsv', 'predictor_2_independent_validation.tsv',
    'predictor_2_all_unused.tsv', 'predictor_2_all_unused_pairs.tsv'
]

#%% Choose the predictor
#'predictor_1', 'predictor_1hs', 'predictor_2', 'predictor_2hs'
predictor_id = 'predictor_2'
predictor_parameters = predictor.predictor_parameters_all[predictor_id]

###################################################################################################
#%% Train the predictor
pred = predictor.Predictor(
    predictor.predictor_parameters_all[predictor_id]['input_file'],
    path_to_data + input_folder, path_to_data + output_folder)
pred.logger.info('Done initializing predictor!')

pred.logger.info('Cross-validating predictor...')
clf, y_true_all, y_pred_all, y_true_all_perdrugpair, y_pred_all_perdrugpair, dfs_left_out = \
    pred.cross_validate_predictor(predictor_parameters['clf_options'], n_folds)

#%%
output_filename = path_to_data + output_folder + predictor_id + '.pickle'
pred.logger.info('Saving cross-validation results to ' + output_filename)
predictor_info = (
    clf,
    y_true_all,
    y_pred_all,
    y_true_all_perdrugpair,
Beispiel #24
0
import predictor
import FGSM_utils
import utils
import sys

if __name__ == '__main__':
    PGM_NUM = 154
    pgms = [
        utils.read_PGM(f"../data/pgm/{i}.pgm") for i in range(1, PGM_NUM + 1)
    ]
    labels = utils.read_labels("../data/labels.txt", PGM_NUM)
    params = predictor.read_params("../data/param.txt")

    pred = predictor.Predictor(params)
    EPS = float(sys.argv[1])

    for i in range(PGM_NUM):
        x, raw_x = pgms[i]
        t = labels[i] - 1

        pred.forward(x)
        dx = pred.backward(t)  # get the gradient vector
        sign = FGSM_utils.sign(dx)
        prx = FGSM_utils.perturb(raw_x, sign, EPS)

        utils.write_PGM(f"../data/FGSM_{EPS:.3f}/{i+1}.pgm", prx)
Beispiel #25
0



#cpdir = './cc-predictor-model'
#cpdir = './modeldata'

parser = argparse.ArgumentParser(description='Do cloud coverage preditcion on image')
parser.add_argument('--dirname', type=str, help='Input directory with imagesto do prediction on')
parser.add_argument('--modeldir', type=str, help='Model dir', default='modeldata')
parser.add_argument('--epoch', type=str, help='epoch', default=888)
parser.add_argument('--with-probs', type=bool, default=False, help='output probabilities')
args = parser.parse_args()


predictor = predictor.Predictor(args.modeldir, int(args.epoch))


def calc_spread(vector):
    i = np.array([0, 1]) / 2
    x = np.array(vector)
    mean = np.sum(x * i)
    variance = sum(i * i * x) - mean*mean
    return variance

if __name__ == "__main__":

    for filename in glob.iglob( args.dirname + '/**/*.jp*g', recursive=True):
    
        result = predictor.predict(filename)
        sys.stdout.write(filename + " ")
Beispiel #26
0
def main(args):

    # training mode
    if args.train:
        logging.info("start training...")
        EG_predictor = predictor.Predictor(max_epochs=args.epoch)

        # load data and create a data set
        with open("./data/train.pkl", "rb") as f:
            train = pickle.load(f)

        logging.info("create a dataset...")
        train_dataset = dataset.Dataset(data=train)
        logging.info("start training!")
        EG_predictor.fit_dataset(data=train_dataset,
                                 collate_fn=train_dataset.collate_fn)

    # test mode
    if args.test:
        logging.info("start testing...")
        EG_predictor = predictor.Predictor(max_epochs=args.epoch)

        # choose the model to test data
        EG_predictor.load("model-40")

        # load data and create a data set
        with open("./data/test.pkl", 'rb') as f:
            test = pickle.load(f)

        logging.info("create a dataset...")
        test_dataset = dataset.Dataset(data=test)
        pre, ans = EG_predictor.predict_dataset(
            data=test_dataset, collate_fn=test_dataset.collate_fn)

        # calculate acc
        correct = 0
        for i in zip(pre, ans):
            pre_ans = 0
            # class number is 7
            for j in range(1, 7):
                if i[0][j] > i[0][pre_ans]:
                    pre_ans = j
            if pre_ans == i[1]:
                correct += 1

        print("correct: ", correct)
        print("acc: ", correct / len(ans))

    # label the raw ecg data
    if args.label:
        logging.info("start labeling...")
        EG_predictor = predictor.Predictor(max_epochs=args.epoch)
        EG_predictor.load("model-40")

        # load data and create a dataset
        path = "./ecg/"
        files = listdir(path)
        for file in files:
            if ".pkl" in file:
                with open(path + file, 'rb') as f:
                    test = pickle.load(f)
                    logging.info("create a dataset...")
                    test_dataset = dataset.Dataset(data=test)
                    pre, _ = EG_predictor.predict_dataset(
                        data=test_dataset, collate_fn=test_dataset.collate_fn)

                    with open("./pre/" + file.split('.')[0] + ".pkl",
                              "wb") as wf:
                        pickle.dump(pre, wf)
Beispiel #27
0
          'Pre-computed transition relationship'.center(80, '#') + color.END)
    print(color.YELLOW + 'INFO: ' + color.END +
          'Generating scheduler environment...')
    tic = time.perf_counter()
    schedulerEnv = environment.SchedulerEnv(buffer_size, num_actions,
                                            num_block, utility)
    toc = time.perf_counter()
    print(color.GREEN + 'SUCCESS: ' + color.END + color.BLUE +
          '[{:0.4f} sec] '.format(toc - tic) + color.END +
          'Scheduler environment generated! Size: {}'.format(
              asizeof.asizeof(schedulerEnv)))
    print('Number of states: {}'.format(schedulerEnv.num_states))

    print(color.YELLOW + 'INFO: ' + color.END + 'Generating predictor...')
    tic = time.perf_counter()
    predictorMgm = predictor.Predictor(buffer_size, num_actions)
    toc = time.perf_counter()
    print(
        color.GREEN + 'SUCCESS: ' + color.END + color.BLUE +
        '[{:0.4f} sec] '.format(toc - tic) + color.END +
        'Predictor generated! Size: {}'.format(asizeof.asizeof(predictorMgm)))

    print(color.YELLOW + 'INFO: ' + color.END + 'Generating scheduler...')
    tic = time.perf_counter()
    schedulerAlg = scheduler.Scheduler()
    toc = time.perf_counter()
    print(
        color.GREEN + 'SUCCESS: ' + color.END + color.BLUE +
        '[{:0.4f} sec] '.format(toc - tic) + color.END +
        'Scheduler generated! Size: {}'.format(asizeof.asizeof(schedulerAlg)))
Beispiel #28
0
def train(args, data, model_directory_path, *, logger=None):
    alphabet_file_path = model_directory_path + args.lang + '-' + args.resource + '-alphabet'

    if args.model_path == None:
        alph = alphabet.make_alphabet(alphabet_file_path, data, logger=logger)
        dim = alph.get_dimension()
        predictor_instance = predictor.Predictor(
            dim,
            dim,
            args.embedding_dim,
            args.hidden_dim,
            args.context_dim,
            optimization_method=args.optimizer,
            activation_method=args.activation)

        model = {
            'predictor': predictor_instance,
            'lang': args.lang,
            'resource': args.resource,
            'embedding_dim': args.embedding_dim,
            'hidden_dim': args.hidden_dim,
            'context_dim': args.context_dim,
            'optimizer': args.optimizer,
            'activation': args.activation,
            'interval': args.model_save_interval,
            'round': 0,
            'interval_loss_history': [],
            'clock_time_history': [],
            'total_loss': 0.0
        }
    else:
        model = pickle.load(open(args.model_path, 'rb'))
        alphabet_file_path = model_directory_path + model[
            'lang'] + '-' + model['resource'] + '-alphabet'
        alph = pickle.load(open(alphabet_file_path, 'rb'))
        dim = alph.get_dimension()

    ###
    # Train phase
    ###
    logger.debug('Start: format train_data')

    train_data = []
    for d in data:
        v = alph.create_index_vector(d)
        train_data.append(v)

    logger.debug('End: format train_data')

    ###
    # Fit
    ###

    clock_time_history = []

    current_loss = 0.0
    averaged_loss = 0.0
    interval_loss = 0.0

    logger.info('Training started')
    start = time.time()
    for epoch in range(args.max_iter):
        epoch_loss = 0.0

        for i in range(len(train_data)):
            in_v = util.convert_to_one_hot(train_data[i][0], dim)
            out_v = util.convert_to_one_hot(train_data[i][1], dim)

            current_loss = model['predictor'].train(in_v, out_v)
            model['total_loss'] += current_loss
            epoch_loss += current_loss
            interval_loss += current_loss

            model['round'] += 1

            if (model['round'] % model['interval']) == 0:
                result = model['predictor'].predict(in_v)
                formatted_out_seq = alph.format_output(result)

                end = time.time()

                elapsed = end - start

                logger.debug('Period: %d' % (model['round']))
                logger.debug('Elapsed (s): %d' % elapsed)
                logger.debug(train_data[i][0])
                logger.debug(train_data[i][1])
                logger.debug(alph.format_output(out_v))
                logger.debug(formatted_out_seq)
                logger.debug(alph.serialize_output(formatted_out_seq))

                logger.info(
                    'Round %d finished. Loss incurred during this interval: %e'
                    % (model['round'], interval_loss))
                model['clock_time_history'].append(elapsed)
                model['interval_loss_history'].append(interval_loss)

                model_file_path = model_directory_path + args.lang + '-' + args.resource + '-' + str(
                    model['round']) + '-model'

                pickle.dump(model, open(model_file_path, 'wb'))

                interval_loss = 0.0
                start = time.time()

        logger.info('Epoch %d finished. Loss incurred during this epoch: %e' %
                    (epoch, epoch_loss))
    'frozen_inference_graph_path',
    './model_pb/frozen_inference_graph_pb/' + 'frozen_inference_graph.pb',
    'Path to frozen inference graph.')
flags.DEFINE_string('images_dir', '/dataSets/testImgs/daisy',
                    'Path to images (directory).')
flags.DEFINE_string('output_path', './test_result.json',
                    'Path to output file.')

FLAGS = flags.FLAGS

if __name__ == '__main__':
    frozen_inference_graph_path = FLAGS.frozen_inference_graph_path
    images_dir = FLAGS.images_dir
    output_path = FLAGS.output_path

    model = predictor.Predictor(frozen_inference_graph_path)
    image_files = glob.glob(os.path.join(images_dir, '*.*'))
    val_results = []
    predicted_count = 0
    num_samples = len(image_files)
    for image_path in image_files:
        predicted_count += 1
        if predicted_count % 100 == 0:
            print('Predict {}/{}.'.format(predicted_count, num_samples))
        image_name = image_path.split('/')[-1]
        image = cv2.imread(image_path)
        if image is None:
            print('image %s does not exist.' % image_name)
            continue
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
Beispiel #30
0
            'airport-barcelona-0-8-a.wav',
            'airport-barcelona-0-9-a.wav']

# Extract features
preprocessor = pred.FeatureExtractor(n_feats=n_feats,
                                     audio_preprocess=audio_preprocess,
                                     feature_type=feature_type)
features = preprocessor.process(audio_files)

# Fold and model list
fold_list = [0, 1, 2, 3, 4]
model_list = ['models_exp_augmentation/monaural_mid100_mixup_0.2_fold02018-11-13T18_39_30.699398.h5',
            'models_exp_augmentation/monaural_mid100_mixup_0.2_fold12018-11-13T21_00_50.589642.h5',
            'models_exp_augmentation/monaural_mid100_mixup_0.2_fold22018-11-13T22_18_05.513530.h5',
            'models_exp_augmentation/monaural_mid100_mixup_0.2_fold32018-11-13T23_22_19.095619.h5',
            'models_exp_augmentation/monaural_mid100_mixup_0.2_fold42018-11-14T02_45_53.091727.h5']

# fold_list = [1]
# model_list = ['models_exp_augmentation/monaural_mid100_mixup_0.2_fold12018-11-13T21_00_50.589642.h5']


# Predict features
predictor = pred.Predictor(zarr_root=zarr_root,
                           zarr_group=zarr_group, 
                           fold_list=fold_list, 
                           model_list=model_list)
predictions = predictor.predict(features)


print(predictions)