Пример #1
0
    def run(self):
        model = Model()
        model.train()
         
        # pickled binary file 형태로 저장된 객체를 로딩한다 
        file_name = '/home/pi/workspace/ai-contents-maze-runner/model/rf.pkl' 
        rfmodel = joblib.load(file_name) 
        
        while True:
            time.sleep(0.01)
            ir1 = self.ir1.proximity
            ir2 = self.ir2.proximity
            if ir2 == 0:
                pass
            else:
                pred = rfmodel.predict([[ir1, ir2, ir1-ir2, ir1/ir2]])
                print("예측값 : ",pred)

                if pred[0] == 1:
                    self.left_fast()
                elif pred[0] == 2:
                    self.left_slow()
                elif pred[0] == 3:
                    self.straight()
                elif pred[0] == 4:
                    self.right_slow()
                elif pred[0] == 5:
                    self.right_fast()
Пример #2
0
def straight_flames():
    import os
    os.chdir(os.path.dirname(os.path.abspath(__file__)))

    model = Model(n_characters, n_hidden, n_characters, n_layers)
    model.load("models/wtchrrnn.pt")

    return " ".join(model.generate("Add ", 40).split(" ")[:-1]).strip() + "."
 def get_gesture_model(weights_path):
     model = Model(42, 32, 5)
     if torch.cuda.is_available():
         model.load_state_dict(torch.load(weights_path))
         model = model.cuda()
     else:
         model.load_state_dict(
             torch.load(weights_path,
                        map_location=lambda storage, loc: storage))
     model.eval()
     return model
Пример #4
0
 def __init__(self,
              label=None,
              role='agent',
              debug=1,
              tfbdc=1,
              ngram=(1, 2)):
     '''
     :param label: str, 待分类类别
     :param role: str, in ['agent', 'all', 'user'], default('agent')
         'agent'表示只抽取了agent的对话, 'all'表示抽取了全部的对话数据,'user'表示只抽取了用户的对话
     :param debug: bool, default True
         True表示采用开发者模式,即不直接从文件中读取中间结果
     '''
     self.label, self.role, self.debug = label, role, debug
     self.tfbdc = tfbdc
     self.mymodel = Model(label=label)
Пример #5
0
def debug(args):

    # prepare predictor
    sess_init = SaverRestore(args.model_path)
    model = Model('train')
    predict_config = PredictConfig(
        session_init=sess_init,
        model=model,
        input_names=['imgs', 'gt_heatmaps', 'gt_pafs', 'mask'],
        output_names=['vgg_features', 'HeatMaps', 'PAFs', 'cost'])
    predict_func = OfflinePredictor(predict_config)

    ds = Data('train', False)

    g = ds.get_data()
    sample = next(g)

    import pdb
    pdb.set_trace()

    sample = [np.expand_dims(e, axis=0) for e in sample]

    vgg_features, heatmap, paf, cost = predict_func(sample)

    import pdb
    pdb.set_trace()
Пример #6
0
def predict(args):
    sess_init = SaverRestore(args.model)
    model = Model()
    predict_config = PredictConfig(session_init=sess_init,
                                   model=model,
                                   input_names=["input"],
                                   output_names=["softmax_output"])

    predict_func = OfflinePredictor(predict_config)

    if os.path.isfile(args.input):
        # input is a file
        newPredict_one(args.input, predict_func, args.output or "output", args.crf)

    if os.path.isdir(args.input):
        # input is a directory
        output_dir = args.output or "output"
        if os.path.isdir(output_dir) == False:
            os.makedirs(output_dir)
        for (dirpath, dirnames, filenames) in os.walk(args.input):
            logger.info("Number of images to predict is " + str(len(filenames)) + ".")
            for file_idx, filename in enumerate(filenames):
                if file_idx % 10 == 0 and file_idx > 0:
                    logger.info(str(file_idx) + "/" + str(len(filenames)))
                filepath = os.path.join(args.input, filename)
                newPredict_one(filepath, predict_func, output_dir, args.crf)
Пример #7
0
def codeMaker():
    global count, codes
    count += 1

    codes[str(count)] = Model()

    return json.dumps({'code': count})
Пример #8
0
def resume(model_path):
    model = Model()
    if model_path:
        model.load_state(model_path)

    for i in range(n_epochs):
        model.train()

        if model.epoch % eval_per_epoch == 0:
            model.eval()
Пример #9
0
def main():
    opt = parse_args()
    print(opt)
    config = opt['config_file'][0]
    mat_dir = opt['mat_dir'][0]
    storage_dir = opt['storage_dir'][0]

    M = Model(storage_dir, config)
    # Load training data if not already done
    if not os.listdir(os.path.join(storage_dir, 'training', 'data')):
        M.loader.load_mat_folder(os.path.join(mat_dir, 'train', 'data'),
                                 'training', 'data')
    else:
        path = os.path.join(storage_dir, 'training', 'data')
        print(f'{path} already contains data, skipping load.')

    # Load validation data if not already done
    if not os.listdir(os.path.join(storage_dir, 'validation', 'data')):
        M.loader.load_mat_folder(os.path.join(mat_dir, 'train', 'data'),
                                 'validation', 'data')
    else:
        path = os.path.join(storage_dir, 'validation', 'data')
        print(f'{path} already contains data, skipping load.')

    history = M.fit_model()
    M.illustrate_history(history)
    M.print_img()
Пример #10
0
def get_pred_func(args):
    sess_init = SaverRestore(args.model_path)
    model = Model(args.net_format)
    predcit_config = PredictConfig(session_init=sess_init,
                                   model=model,
                                   input_names=["input", "label"],
                                   output_names=["LABELS", "BBOXS"])
    predict_func = OfflinePredictor(predcit_config)
    return predict_func
Пример #11
0
def get_pred_func(args):
    sess_init = SaverRestore(args.model_path)
    predict_config = PredictConfig(session_init=sess_init,
                                   model=Model(),
                                   input_names=["img_input"],
                                   output_names=["network_input", "img_pred"])

    predict_func = OfflinePredictor(predict_config)
    return predict_func
Пример #12
0
def predict(args):
    sess_init = SaverRestore(args.model_path)
    model = Model()
    predict_config = PredictConfig(session_init=sess_init,
                                   model=model,
                                   input_names=["feat", "seqlen"],
                                   output_names=["logits"])

    predict_func = OfflinePredictor(predict_config)

    err_num = 0
    tot_num = 0
    if args.input_path is not None and os.path.isfile(args.input_path):
        # input is a file
        result = predict_one(args.input_path, predict_func, None)
        print(result)
        label_filename = args.input_path.replace("png", "txt")
        if os.path.isfile(label_filename):
            with open(label_filename) as label_file:
                content = label_file.readlines()
                target = content[0]
            (cur_err, cur_len) = sequence_error_stat(target, result)
            err_num = err_num + cur_err
            tot_num = tot_num + cur_len
        else:
            f = open(label_filename,'w')
            f.write(result)
            f.close()
    if args.test_path is not None and os.path.isfile(args.test_path):
        # input is a text file
        with open(args.test_path) as f:
            content = f.readlines()

        lines = [e.strip() for e in content]

        for idx, input_path in enumerate(lines):
            if idx > 0 and idx % 1000 == 0:
                logger.info(str(idx) + "/" + str(len(lines)))
            result = predict_one(input_path, predict_func, idx + 1)
            print(result)
            ext = input_path.split('.')[1]
            label_filename = input_path.replace(ext, "txt")
            if os.path.isfile(label_filename):
                with open(label_filename) as label_file:
                    content = label_file.readlines()
                    target = content[0]
                (cur_err, cur_len) = sequence_error_stat(target, result)
                if cur_err > 0:
                    logger.info(input_path)
                    logger.info(target)
                    logger.info(result)
                err_num = err_num + cur_err
                tot_num = tot_num + cur_len

    if tot_num > 0:
        logger.info("Character error rate is: " + str(err_num) + "/" + str(tot_num) + "(" + str(err_num * 1.0 / tot_num) + ")")
Пример #13
0
def initialize(model_path):
    # prepare predictor
    sess_init = SaverRestore(model_path)
    model = Model()
    predict_config = PredictConfig(session_init = sess_init,
                                   model = model,
                                   input_names = ['imgs'],
                                   output_names = ['heatmaps'])#n h w c
    predict_func = OfflinePredictor(predict_config)
    return predict_func    
Пример #14
0
class Predictor:

    model = Model()

    def predict(self):

        clf_naive_bayes = self.model.naive_bayes()
        test_pred = clf_naive_bayes.predict(test_set)

        print(test_pred)
Пример #15
0
def get_pred_func(args):
    sess_init = SaverRestore(args.model_path)
    model = Model(args.data_format)
    predict_config = PredictConfig(session_init=sess_init,
                                   model=model,
                                   input_names=["input", "spec_mask"],
                                   output_names=["pred_x", "pred_y", "pred_w", "pred_h", "pred_conf", "pred_prob"])

    predict_func = OfflinePredictor(predict_config) 
    return predict_func
Пример #16
0
def get_pred_func(args):
    sess_init = SaverRestore(args.model_path)
    model = Model(1)
    predict_config = PredictConfig(session_init=sess_init,
                                   model=model,
                                   input_names=["exemplar_img", "search_img"],
                                   output_names=["prediction"])

    predict_func = OfflinePredictor(predict_config)
    return predict_func
Пример #17
0
def predict(args):
    sess_init = SaverRestore(args.model)
    model = Model()
    predict_config = PredictConfig(session_init=sess_init,
                                   model=model,
                                   input_names=['input'],
                                   output_names=['output'])

    predict_func = OfflinePredictor(predict_config)

    return predict_func
Пример #18
0
def predict(args):
	# get only the grayscale image that is stored in the blue channel

	img = Image.open(args.image_path)
	if img.mode == 'RGB':
		print("Only taking blue channel from RGB image")
		img = img.split()[2]
	elif img.mode == 'L':
		print("Provided image is already grayscale")

	
	transform = transforms.Compose([transforms.Resize(256), transforms.ToTensor(),
	                                transforms.Normalize(mean=[0.485], std=[0.229])])
	
	# img = img.split()[2]
	img_t = transform(img)
	batch_t = torch.unsqueeze(img_t, 0)
	
	model = Model()
	
	checkpoint = torch.load(args.model_weights)
	model.load_state_dict(checkpoint['state_dict'])
	
	model.eval()
	out = model(batch_t)
	_, preds = torch.max(out, 1)
	
	print('Result: ', args.all_labels[preds.item()])
def predict_masks(args, hps, store, to_predict: List[str], threshold: float,
                  validation: str=None, no_edges: bool=False):
    logger.info('Predicting {} masks: {}'
                .format(len(to_predict), ', '.join(sorted(to_predict))))
    model = Model(hps=hps)
    if args.model_path:
        model.restore_snapshot(args.model_path)
    else:
        model.restore_last_snapshot(args.logdir)

    def load_im(im_id):
        data = model.preprocess_image(utils.load_image(im_id))
        if hps.n_channels != data.shape[0]:
            data = data[:hps.n_channels]
        if validation == 'square':
            data = square(data, hps)
        return Image(id=im_id, data=data)

    def predict_mask(im):
        logger.info(im.id)
        return im, model.predict_image_mask(im.data, no_edges=no_edges)

    im_masks = map(predict_mask, utils.imap_fixed_output_buffer(
        load_im, sorted(to_predict), threads=2))

    for im, mask in utils.imap_fixed_output_buffer(
            lambda _: next(im_masks), to_predict, threads=1):
        assert mask.shape[1:] == im.data.shape[1:]
        with gzip.open(str(mask_path(store, im.id)), 'wb') as f:
            # TODO - maybe do (mask * 20).astype(np.uint8)
            np.save(f, mask >= threshold)
Пример #20
0
def eval_model():
    """
    Evaluate the model
    :return:
    """
    # Load graph
    g = Model(mode="eval")
    print("Evaluation Graph loaded")

    # Load data
    fpaths, text_lengths, texts = input_load(mode="eval")

    # Parse
    text = np.fromstring(texts[0], np.int32)
    fname, mel, mag = create_spectrograms(fpaths[0])

    # Inputs
    text = np.expand_dims(text, 0)
    mels = np.expand_dims(mel, 0)
    mags = np.expand_dims(mag, 0)

    saver = tf.train.Saver()
    with tf.Session() as sess:
        # restore the model
        saver = tf.train.import_meta_graph(os.path.join(LOG_DIR, MODEL_NAME))
        saver.restore(sess, tf.train.latest_checkpoint(LOG_DIR))
        print("Restored!")

        writer = tf.summary.FileWriter(LOG_DIR, sess.graph)

        # Feed Forward
        # mels
        print("Running session...")
        mels_hat = np.zeros((1, mels.shape[1], mels.shape[2]), np.float32)
        for i in tqdm(range(mels.shape[1])):
            _mels_hat = sess.run(g.mel_hat, {g.txt: text, g.mels: mels_hat})
            mels_hat[:, i, :] = _mels_hat[:, i, :]

        # mags
        print("Generating summaries...")
        merged, gs = sess.run([g.merged, g.global_step], {
            g.txt: text,
            g.mels: mels,
            g.mel_hat: mels_hat,
            g.mags: mags
        })

        # summary
        writer.add_summary(merged, global_step=gs)
        writer.close()
Пример #21
0
def predict(args):
    sess_init = SaverRestore(args.model_path)
    model = Model()
    predict_config = PredictConfig(session_init=sess_init,
                                   model=model,
                                   input_names=['lr_imgs'],
                                   output_names=[
                                        'flows',
                                        'after_warp',
                                        'predictions'
                                    ])
    predict_func = OfflinePredictor(predict_config)

    predict_one(args.img_path, args.ref_path, predict_func)
Пример #22
0
def main():
	print("Abhishek Kumar")
	print("15648")
	print("CSA")

	parser = argparse.ArgumentParser()
	parser.add_argument("--test-data", default='test_input.txt', type=str, help="Name of the test file")
	args = parser.parse_args()
	
	model_path='model/model.bin'
	model = Model(h=4)
	device = "cpu"
	model.to(device)
	eval_dataset,w,data = read_test_data(args.test_data)
	model.load_state_dict(torch.load(model_path))
	evaluate(model, eval_dataset,device,w,data)
Пример #23
0
def main(_):

    tfconfig = tf.compat.v1.ConfigProto(allow_soft_placement=False)
    tfconfig.gpu_options.allow_growth = True
    with tf.compat.v1.Session(config=tfconfig) as sess:
        model = Model(sess, args)

        if args.phase == 'train':
            print('Train')
            model.train(args)

        if args.phase == 'inference' or args.phase == 'test':
            print("Inference.")
            model.inference(args.inference_images_dir,
                            to_save_dir=args.save_dir)
        sess.close()
    print('FINISHED')
def run_feature_selection(df, out_dir, target_col="y", n_cols=300):
    """xfeat_feature_selection()実行"""
    # target encording
    df = df[df[target_col].notnull()]
    (X_train, X_test, y_train, y_test) = train_test_split(df.drop(target_col,
                                                                  axis=1),
                                                          df[target_col],
                                                          test_size=0.1,
                                                          random_state=71)
    train_df = pd.concat([X_train, y_train], axis=1)
    valid_df = pd.concat([X_test, y_test], axis=1)
    train_df, _ = Model()._encoding(train_df, valid_df, target_col)
    print(train_df.shape)

    # feature_importance高い順に列数を 列数*threshold にする
    threshold = n_cols / train_df.shape[1]
    params = {
        "metric": "binary_logloss",
        "objective": "binary",
        "threshold": threshold,
    }  # metric=roc_aucでも可能
    select_df = FeatureSelect().xfeat_feature_selection(
        train_df, target_col, params)
    print(select_df.shape)
    print(select_df.columns)

    # 列名保持
    feature_selections = sorted(select_df.columns.to_list())
    feature_selections.append(target_col)
    pd.DataFrame({
        "feature_selections": feature_selections
    }).to_csv(f"{out_dir}/feature_selection_cols.csv", index=False)

    not_feature_selections = list(
        set(train_df.columns.to_list()) - set(feature_selections))
    pd.DataFrame({
        "not_feature_selections": not_feature_selections
    }).to_csv(f"{out_dir}/not_feature_selection_cols.csv", index=False)
Пример #25
0
def run(train_size, test_size, dataset_pickle_filepath,
        output_pickle_filepath):
    dataset = load_dataset('./data', 1, dataset_pickle_filepath)
    dataset = dataset[:train_size + test_size]
    train_dataset = dataset[:train_size]
    test_dataset = dataset[train_size:]
    print('数据集读取成功,总数 {}'.format(len(dataset)))
    word_set = create_word_set(train_dataset)
    print('词集创建成功,长度', len(word_set), ',开始创建词向量')
    train_vectors, train_labels = dataset2vectors(word_set, train_dataset)
    test_vectors, test_labels = dataset2vectors(word_set, test_dataset)
    print('词向量创建成功')
    # train_vectors, train_labels, test_vectors, test_labels = split_vectors_by_ratio(vectors, labels, train_size, test_size)
    print('数据集切分成功,训练集长度 {}×{},测试集长度 {}×{}'.format(train_vectors.shape[0],
                                                   train_vectors.shape[1],
                                                   test_vectors.shape[0],
                                                   test_vectors.shape[1]))
    print('开始训练,训练集长度 {}×{}'.format(train_vectors.shape[0],
                                    train_vectors.shape[1]))
    Pspam, Pham, PS, PH = train_nb(train_vectors, train_labels, word_set)
    print('开始测试,测试集长度 {}×{}'.format(test_vectors.shape[0],
                                    test_vectors.shape[1]))
    predict_vector, probabilities = predict_nb(test_vectors, Pspam, Pham, PS,
                                               PH)
    accuracy = calc_accuracy(predict_vector, test_labels)
    precision = calc_precision(predict_vector, test_labels)
    recall = calc_recall(predict_vector, test_labels)
    print('测试完成,准确率 {},精确率 {},召回率 {}%'.format(accuracy, precision, recall))

    # 防止 __main__.Model 问题
    # https://stackoverflow.com/questions/40287657/load-pickled-object-in-different-file-attribute-error
    from train import Model
    output = Model(word_set, Pspam, Pham, PS, PH, datetime.now())
    with open(output_pickle_filepath, 'wb') as f:
        pickle.dump(output, f)
    return word_set, accuracy, precision, recall
Пример #26
0
# -*- coding: utf-8 -*-

import cv2
import sys
import gc
from train import Model

if __name__ == '__main__':
    if len(sys.argv) != 1:
        print("Usage:%s camera_id\r\n" % (sys.argv[0]))
        sys.exit(0)

    # 加载模型
    model = Model()
    model.load_model(file_path='../model/face.model.h5')

    # 框住人脸的矩形边框颜色
    color = (0, 255, 0)

    # 捕获指定摄像头的实时视频流
    cap = cv2.VideoCapture(0)

    # 人脸识别分类器本地存储路径
    # cascade_path = "haarcascade_frontalface_alt2.xml"

    # 循环检测识别人脸
    while True:
        ret, frame = cap.read()  # 读取一帧视频

        if ret is True:
 def train(self):
     model = Model()
     model.train()
Пример #28
0
def predict(args):

    sess_init = SaverRestore(args.model)
    model = Model()
    predict_config = PredictConfig(session_init=sess_init,
                                   model=model,
                                   input_names=["input"],
                                   output_names=["NETWORK_OUTPUT"])

    predict_func = OfflinePredictor(predict_config)
    if os.path.isdir(args.input):
        # this is a directory, should have subdirectories, whose names are numbers and contents are images
        samples = []
        class_dirs = os.listdir(args.input)
        for class_dir in class_dirs:
            klass = int(class_dir)
            class_dir_path = os.path.join(args.input, class_dir)
            for filename in os.listdir(class_dir_path):
                samples.append([klass, os.path.join(class_dir_path, filename)])
        # for (dirpath, dirname, filenames) in os.walk(args.input):
        #     klass = dirpath[len(args.input):]
        #     if klass == "":
        #         continue
        #     for filename in filenames:
        #         samples.append([int(klass), os.path.join(dirpath, filename)])
        wrong = []
        for (idx, sample) in enumerate(samples):
            klass = sample[0]
            img_path = sample[1]
            img = misc.imread(img_path)
            if args.crop == True:
                height, width, _ = img.shape
                y_start = int((height - cfg.img_size) / 2)
                x_start = int((width - cfg.img_size) / 2)
                img = img[y_start:y_start + cfg.img_size,
                          x_start:x_start + cfg.img_size]
            img = np.expand_dims(img, axis=0)

            predictions = predict_func([img])[0]
            result = np.argmax(predictions)
            if klass != result:
                wrong.append([img_path, result])
            if idx % 300 == 299:
                logger.info(str(idx + 1) + "/" + str(len(samples)))
        logger.info("Total number is: " + str(len(samples)))
        logger.info("Error number is: " + str(len(wrong)))
        for wrong_sample in wrong:
            logger.warn("Wrong: " + wrong_sample[0] + ", predicted class is " +
                        str(wrong_sample[1]))

    else:
        # this is a file
        if args.input.endswith(".txt"):
            # this is a txt file
            with open(args.input) as f:
                records = f.readlines()
            records = [e.strip().split(' ') for e in records]
            img_paths = [e[0] for e in records]
            classes = [int(e[1]) for e in records]

            error_stat = np.zeros(cfg.class_num)
            error_detail = {}
            tot_stat = np.zeros(cfg.class_num)

            for img_idx, img_path in enumerate(img_paths):
                if img_idx > 0 and img_idx % 100 == 0:
                    print(img_idx)
                img = misc.imread(img_path)
                if args.crop == True:
                    height, width, _ = img.shape
                    y_start = int((height - cfg.img_size) / 2)
                    x_start = int((width - cfg.img_size) / 2)
                    img = img[y_start:y_start + cfg.img_size,
                              x_start:x_start + cfg.img_size]
                img = np.expand_dims(img, axis=0)
                predictions = predict_func([img])[0]
                result = np.argmax(predictions)

                tot_stat[classes[img_idx]] += 1
                if classes[img_idx] != result:
                    error_stat[classes[img_idx]] += 1
                    if classes[img_idx] not in error_detail.keys():
                        error_detail[classes[img_idx]] = []
                    error_detail[classes[img_idx]].append(result)
            pdb.set_trace()

        else:
            # this should be an image file
            img = misc.imread(args.input)
            if args.crop == True:
                height, width, _ = img.shape
                y_start = int((height - cfg.img_size) / 2)
                x_start = int((width - cfg.img_size) / 2)
                img = img[y_start:y_start + cfg.img_size,
                          x_start:x_start + cfg.img_size]
            img = np.expand_dims(img, axis=0)

            predictions = predict_func([img])[0]
            result = np.argmax(predictions)

            sort_pred = np.sort(predictions)
            sort_idx = np.argsort(predictions)

            logger.info(result)
Пример #29
0
    msg = msg + ','

    if centerX <= screenCenterX:
        msg = msg + 'LEFT'
    elif centerX > screenCenterX:
        msg = msg + 'RIGHT'
    return msg

if __name__ == '__main__':
    if len(sys.argv) != 1:
        print("Usage:%s camera_id\r\n" % (sys.argv[0]))
        sys.exit(0)

    # 加载模型
    model = Model()
    model.load_model(file_path='/Users/limeng/Pictures/opencv/lm_face_model.h5')

    # 框住人脸的矩形边框颜色
    color = (255, 255, 255)

    # 捕获指定摄像头的实时视频流
    cap = cv2.VideoCapture(0)

    # 人脸识别分类器本地存储路径
    cascade_path = "/Users/limeng/code/python/Face_Recog/haarcascade_frontalface_default.xml"
    # 使用人脸识别分类器,读入分类器
    cascade = cv2.CascadeClassifier(cascade_path)
    # 循环检测识别人脸
    while True:
        ret, frame = cap.read()  # 读取一帧视频
Пример #30
0
    if y > CropPadding: y = y - CropPadding
    else: y = 0
    h += 2 * CropPadding
    if x > CropPadding: x = x - CropPadding
    else: x = 0
    w += 2 * CropPadding
    return [x, y, w, h]


if __name__ == '__main__':
    # Change working directory
    os.chdir(os.path.dirname(os.path.realpath(__file__)))

    cap = cv2.VideoCapture(0)

    model = Model()
    model.load()

    # Get Cascade Classifier
    cascade = cv2.CascadeClassifier(cascade_path)

    isme = 0
    notme = 0

    nDelay = 0

    # Run window in other thread
    cv2.startWindowThread()

    while True:
        _, frame = cap.read()