예제 #1
0
파일: test_cup.py 프로젝트: megumigachi/CUP
 def _test_infer(self):
     infer_args = {
         '--cuda': True,
         '--model-class': self.args['--model-class'],
         '--seed': "0",
         '--beam-size': "2",
         '--max-dec-step': "50",
         '--beam-class': "models.beam.Beam",
         '--model-type': "generator",
         '--batch-size': 3,
         'MODEL_PATH': os.path.join(self.args['--save-to']),
         'TEST_SET_FILE': os.path.join(self.dataset_prefix, "test.jsonl"),
         'OUTPUT_FILE': os.path.join(self.dataset_prefix, "result.json")
     }
     infer = Infer(infer_args)
     hypos = infer.infer()
     return hypos
예제 #2
0
# 数据路径
data_path = "trainPart/dataset/"
trainList = './trainPart/train_data.txt'
testList = './trainPart/test_data.txt'
readPath = "./trainPart/dataset/IMG/"
savePath = "./trainPart/dataset/IMG_Seg/"
if not os.path.exists(savePath):
    os.mkdir(savePath)
# 多少比例用作训练集
ratio = 0.8

window = ImageGrab.grab()  # 获得当前屏幕,存窗口大小
imm = cv2.cvtColor(np.array(window), cv2.COLOR_RGB2BGR)  # 转为opencv的BGR格式
width, height = window.size
r = Infer(width, height, imm)
inf = Infer(width, height, imm)
inf.infer_pictures(readPath, savePath)

# 只读csv文件
with open(data_path + "log.txt", 'r') as logFile:
    _list = logFile.readlines()

    # 判断图片数是否匹配
    ls_imgs = glob.glob(data_path + 'IMG/*.jpg')
    print(len(ls_imgs))
    print(len(_list))
    assert len(ls_imgs) == len(_list), 'number of images does not match'

    if (os.path.exists(trainList)):
        os.remove(trainList)
예제 #3
0
from infer import Infer
from config import ModelBasic, TrainBasic

infer = Infer(corpus_name=TrainBasic.dataset,
              run_name=TrainBasic.runname,
              sample_num=TrainBasic.batch_size,
              sample_dim=ModelBasic.in_out_dim)

infer.generate(0, 0)
예제 #4
0
from infer import Infer
import time

rect = (678, 350, 1078, 550)
j = pyvjoy.VJoyDevice(1)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
[infer_program, feeded_var_names,
 target_var] = fluid.io.load_inference_model(dirname="./model_infer/",
                                             executor=exe)

window = ImageGrab.grab()  # 获得当前屏幕,存窗口大小
img = cv2.cvtColor(np.array(window), cv2.COLOR_RGB2BGR)  # 转为opencv的BGR格式
width, height = window.size
r = Infer(width, height, img)
inf = Infer(width, height, img)

segFlag = False  # 是否是处理分割的图片


def control(ang, brake):
    if ang > 60:
        ang = 60
    if ang < -60:
        ang = -60
    global j
    x = ang / 180 + 0.5
    j.data.wAxisX = int(x * 32767)
    j.data.wAxisY = int(brake * 32767)
    j.data.wAxisZ = 0
예제 #5
0
    def evaluate(self, epoch, step):
        infer = Infer(corpus_name=self.corpus_name, run_name=self.run_name, sample_num=TrainBasic.eval_size, sample_dim=ModelBasic.in_out_dim)

        infer.generate(epoch, step)
예제 #6
0
    args.feature_shape = tuple([int(x) for x in args.feature_shape.split(',')])
    args.record_shape = tuple([int(x) for x in args.record_shape.split(',')])

    task = args.__dict__['crohns_or_polyps']

    if args.pytorch:
        trainer = PytorchTrainer(args)
        trainer.train()
    else:

        # if task == 'Polyps_CT':
        #     decode_record = generate_decode_function(args.record_shape, 'image')
        #     model = VGG
        if task == 'Crohns_MRI':
            decode_record = generate_decode_function(args.record_shape,
                                                     'axial_t2')
            model = ResNet3D
        args.__dict__['decode_record'] = decode_record

        if args.mode == 'train':
            trainer = Trainer(args, model)
            trainer.train()
        elif args.mode == 'test':
            infer = Infer(args, model)
            infer.test(os.path.join(args.base, args.test_datapath))

            axial_path = '/vol/bitbucket/rh2515/MRI_Crohns/A/A36 Axial T2.nii'
            coords = [198, 134, 31]
            infer.infer(axial_path, coords, args.record_shape,
                        args.feature_shape)
            # model_path_list.append(os.path.join(directory, filename))
            model_path_list.append(filename)
    print(model_path_list)
    model_path_list.sort(key=lambda x:int(x.split('_')[2]))
    
    # model_path_list.sort()
    for i in range(len(model_path_list)):
        if i < 29 or i > 49:
          continue
        if i % opts.span == opts.span-1:
        # if i > -1:

            print(model_path_list[i])

            model_state = torch.load(os.path.join(directory, model_path_list[i]))
            Model.load_state_dict(model_state)
            Model.to("cuda")
            summary(Model, (3, 64, 64))
            break
            dataset = Dataload(imgpath=opts.image_folder, csv_name=csv_name)
            with torch.no_grad():
                print("infer")
                kld_list = Infer(Model, dataset, batch_size=opts.bs, latent_dim=opts.latent_dim, output_folder=opts.model_folder)

            index_of_change = linearSearch(kld_list, opts.top_K)  

            np.save(os.path.join(opts.model_folder, 'scene_change_res' + str(i) + '.npy'), index_of_change)
       


예제 #8
0
import time

segFlag = False  # 是否是处理分割的图片
rect = (678, 350, 1078, 550)
j = pyvjoy.VJoyDevice(1)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
[infer_program, feeded_var_names,
 target_var] = fluid.io.load_inference_model(dirname="./model_infer/",
                                             executor=exe)

window = ImageGrab.grab()  # 获得当前屏幕,存窗口大小
img = cv2.cvtColor(np.array(window), cv2.COLOR_RGB2BGR)  # 转为opencv的BGR格式
width, height = window.size
inf = Infer(width, height, img) if segFlag else None


def control(ang, brake):
    if ang > 60:
        ang = 60
    if ang < -60:
        ang = -60
    global j
    x = ang / 180 + 0.5
    j.data.wAxisX = int(x * 32767)
    j.data.wAxisY = int(0.2 * 32767)
    j.data.wAxisZ = 0
    j.update()

예제 #9
0
    print("Running with arguments: ")
    for a in args.__dict__:
        print(str(a) + ": " + str(args.__dict__[a]))
    args.attention = int(args.attention)
    # args.localisation = int(args.localisation)
    # args.mixedAttention = int(args.mixedAttention)
    # args.deeper = int(args.deeper)

    args.feature_shape = tuple([int(x) for x in args.feature_shape.split(',')])
    args.record_shape = tuple([int(x) for x in args.record_shape.split(',')])

    task = args.__dict__['crohns_or_polyps']
    # if task == 'Polyps_CT':
    #     decode_record = generate_decode_function(args.record_shape, 'image')
    #     model = VGG
    if task == 'Crohns_MRI':
        decode_record = generate_decode_function(args.record_shape, 'axial_t2')
        model = ResNet3D
    args.__dict__['decode_record'] = decode_record

    if args.mode == 'train':
        trainer = Trainer(args, model)
        trainer.train()
    elif args.mode == 'test':
        infer = Infer(args, model)
        # the following are harded coded examples (change to run inference on other images)
        axial_path = './examples/A1 Axial T2.nii'
        coords = [198, 134, 31]

        infer.infer(axial_path, coords, args.record_shape, args.feature_shape)
import itertools

config_file = json.load(open('./config.json'))
num_inferences = 1  # number of iteration for each configuration

for _ in range(num_inferences):
    pass
    for c in itertools.product(
            config_file['n_layers'], config_file['n_filters'],
            config_file['batch_size'], config_file['input_size'],
            config_file['n_classes'], config_file['kernel_size'],
            config_file['fc_units'], config_file['kernel_stride']):
        config = {
            'n_layers': c[0],
            'n_filters': c[1],
            'batch_size': c[2],
            'input_size': c[3],
            'n_classes': c[4],
            'kernel_size': c[5],
            'fc_units': c[6],
            'kernel_stride': c[7]
        }
        print(config)
        sl = Infer(config)
        sl.get_data()
        sl.model()
        sl.loss()
        sl.optimizer()
        sl.infer()
        del sl
예제 #11
0
파일: kb.py 프로젝트: pranavrc/infer
#!/usr/bin/env python

''' Testing Infer on HF Support KB. '''

from infer import Infer

if __name__ == "__main__":
    kb_topics = [topic.rstrip('\n') for topic in open('./kb.txt')]
    stoplist = set('for a of the and to in is are to how do can I ?'.split())
    infer = Infer()
    infer.build(kb_topics, stoplist, update=False, num_topics=len(kb_topics))
    
    sims = infer.infer("How do I subscribe to a ticket")
    print kb_topics[sims[0][0]]
num_inferences = 1 # number of iteration for each configuration
store = True

count = 0
for c in itertools.product(config_file['n_layers'], config_file['n_filters'], config_file['batch_size'],
                            config_file['input_size'],config_file['n_classes'], config_file['kernel_size'],config_file['fc_units'],
                            config_file['kernel_stride']):
    print('configuration', count)
    count += 1
    for i in range(num_inferences):
        print(i)
        config = {'n_layers' : c[0],
                    'n_filters' : c[1],
                    'batch_size' : c[2],
                    'input_size' : c[3],
                    'n_classes' : c[4],
                    'kernel_size' : c[5],
                    'fc_units' : c[6],
                    'kernel_stride' : c[7]}
        print(config)
        sl = Infer(config)
        sl.get_data()
        sl.model()
        sl.loss()
        sl.optimizer()
        model_complexity = sl.infer()
        model_complexity['config'] = config
        if store:
            collection.insert(model_complexity)
        del sl
예제 #13
0
    result_file_name = image_file + '.txt'
    with codecs.open(result_file_name, 'w', encoding='utf-8') as f:
        for i, image_name in enumerate(image_name_list):
            image_path = os.path.join(image_file, image_name)
            try:
                image = Image.open(image_path)
                predict_text = ocr_engine.predict(image, long_info=False)
            except:
                predict_text = ''
            print(image_path)
            print(predict_text)
            f.write('{}\t{}\n'.format(image_path, predict_text))
            f.flush()


ocr_engine = Infer('/home/huluwa/tf_crnn/model/ctc_center')
if __name__ == "__main__":
    TEST_OCR_MODEL = False
    TEST_BATCH_OCR_MODEL = True
    if TEST_OCR_MODEL:
        root_dir = './data_example/test_data/xingjin'
        gt_file = './data_example/test_data/xingjin1'
        report_file = './testset_result_local.txt'
        start_time = time.time()
        test_ocr_model(root_dir, gt_file, report_file)
        print('total cost time is %.4f ms' % ((time.time() - start_time) * 1000))
        exit()
    if TEST_BATCH_OCR_MODEL:
        root_dir = './data_example/test_data/xingjin'
        gt_file = './data_example/test_data/xingjin1'
        report_file = './testset_result_batch.txt'
예제 #14
0
파일: main.py 프로젝트: wljSky/ai_explore
                             FLAGS.num_classes, FLAGS.embedding_dim,
                             len(data_processor.char2idx), FLAGS.hidden_size,
                             FLAGS.learning_rate)
            elif args.model == 'bimpm':
                model = BIMPM()
            elif args.model == 'abcnn':
                model = ABCnn()
        elif args.task == "chatbot":
            if args.model == "seq2seq_att":
                word2inx = data_processor.char2idx
                word2inx['<GO>'] = len(word2inx) + 1
                word2inx['<EOS>'] = len(word2inx) + 1
                model = Seq2SeqWithAtt(
                    FLAGS.max_len,
                    len(word2inx),  # FLAGS.vocab_size,
                    word2inx,  # FLAGS.word2inx,
                    FLAGS.embedding_dim,
                    FLAGS.state_size,
                    FLAGS.num_layers,
                    FLAGS.use_attention,
                    FLAGS.use_teacher_forcing,
                    FLAGS.learning_rate,
                    FLAGS.beam_width)

        if args.mode == "train":
            trainer = Train(model, FLAGS, sess_config, field_len=field_len)
            trainer.train(sess, train_data, eval_data, test_data)
        elif args.mode == "eval":
            inferor = Infer(model, FLAGS, sess)
            inferor.infer(eval_data, field_len=field_len)