Beispiel #1
0
def test_receive_images():
    results = []
    images = [[0]*4, [1]*4, [2]*4]

    face = Face()
    view = View()
    model = Model(view)
    callback = lambda img: results.append(img)

    model.request_image(face, callback)
    model.request_image(face, callback)

    view.image = images[0]
    view.callback()
    assert len(results) == 1
    assert allclose(results[0], images[0])

    view.image = images[1]
    view.callback()
    assert len(results) == 2
    assert allclose(results[1], images[1])

    final_callback = lambda img: results.append([img])
    model.request_image(face, callback)
    view.image = images[2]
    view.callback()
    assert len(results) == 3
    assert allclose(results[2], [images[2]])
Beispiel #2
0
def test_face_setter():
    view = View()
    model = Model(view)
    face = Face()
    model.face = face
    assert model.face is face
    assert view.face is face
Beispiel #3
0
def test_request_image():
    results = []

    face = Face()
    callback = lambda img: results.append(img)
    model = Model(View())

    model.request_image(face, callback)
    assert results == []
Beispiel #4
0
def test_receive_image():
    results = []
    face = Face()
    view = View()
    model = Model(view)
    callback = lambda img: results.append(img)
    model.request_image(face, callback)
    view.image = [1, 2, 3, 4]
    view.callback()
    assert allclose(results[0], view.image)
Beispiel #5
0
def setup_application(configuration_filename=None):
    MFM.init()
    view = View((500, 500))
    model = Model(view)
    ModelInput(model)

    if configuration_filename is None:
        model.start()
    else:
        model.start(construct_chain(configuration_filename, model))
Beispiel #6
0
def test_request_images():
    results = []
    images = [[0]*4, [1]*4, [2]*4]

    face = Face()
    view = View()
    model = Model(view)
    callback = lambda img: results.append(img)

    model.request_image(face, callback)
    model.request_image(face, callback)
    assert len(results) == 0
Beispiel #7
0
def make_model(num_i=4, num_o=4):
    """
    Create empty Model object.

    Args:
        num_i: int, number of inputs.
        num_o: int, number of outputs.

    Returns:
        Model object.
    """
    return Model(num_i, num_o)
Beispiel #8
0
def main():
    data_generator = load_data()
    _history = []
    device = None
    model = None
    criterion = None
    fold_index = 0

    for TEXT, LABEL, train_data, val_data in data_generator.get_fold_data(num_folds=args['num_folds']):
        logger.info("***** Running Training *****")
        logger.info(f"Now fold: {fold_index + 1} / {args['num_folds']}")

        TEXT.build_vocab(train_data, max_size=25000, vectors="glove.6B.300d")
        logger.info(f'Embedding size: {TEXT.vocab.vectors.size()}.')
        LABEL.build_vocab(train_data) # For converting str into float labels.

        model = Model(len(TEXT.vocab), args['embedding_dim'], args['hidden_dim'],
            args['output_dim'], args['num_layers'], args['dropout'], TEXT.vocab.vectors, args["embedding_trainable"])
        
        optimizer = optim.Adam(model.parameters())
        criterion = nn.BCEWithLogitsLoss()

        if args['gpu'] is True and args['gpu_number'] is not None:
            torch.cuda.set_device(args['gpu_number'])
            device = torch.device('cuda')
            model = model.to(device)
            criterion = criterion.to(device)
        else:
            device = torch.device('cpu')
            model = model.to(device)
            criterion = criterion.to(device)
        
        train_iterator = data.Iterator(train_data, batch_size=args['batch_size'], sort_key=lambda x: len(x.text), device=device)
        val_iterator = data.Iterator(val_data, batch_size=args['batch_size'], sort_key=lambda x: len(x.text), device=device)

        for epoch in range(args['epochs']):
            train_loss, train_acc = train_run(model, train_iterator, optimizer, criterion)
            logger.info(f'| Epoch: {epoch+1:02} | Train Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')
        val_loss, val_acc = eval_run(model, val_iterator, criterion)
        logger.info(f'Val. Loss: {val_loss:.3f} | Val. Acc: {val_acc*100:.2f}% |')

        _history.append([val_loss, val_acc])
        fold_index += 1
    
    _history = np.asarray(_history)
    loss = np.mean(_history[:, 0])
    acc = np.mean(_history[:, 1])
    
    logger.info('***** Cross Validation Result *****')
    logger.info(f'LOSS: {loss}, ACC: {acc}')
Beispiel #9
0
def make_model_for_env(env_id):
    """
    Create empty Model object based ob gym environment requirements.

    Args:
        env_id: str, id of the environment.

    Returns:
        Model object.
    """
    tmp_env = gym.make(env_id)
    num_i = tmp_env.observation_space.shape[0]
    if isinstance(tmp_env.action_space, gym.spaces.discrete.Discrete):
        num_o = tmp_env.action_space.n
    else:
        num_o = tmp_env.action_space.shape[0]
    return Model(num_i, num_o)
def test():
    logger.info("***** Setup *****")
    logger.info(f"Configs: {args}")

    # make iterators
    data_proceessor = DataProcessor()
    test_data = data_proceessor.get_test_data(args['data_ratio'])
    test_iterator = DataLoader(test_data,
                               batch_size=args["batch_size"],
                               shuffle=True)

    # build a model
    model = Model(input_dim=28 * 28,
                  hidden_dim=args['hidden_dim'],
                  drop_rate=args['drop_rate'])

    # load weights
    model_dict = model.state_dict()
    weights_dict = torch.load(args["weight_name"])
    model.load_state_dict(weights_dict)

    # define an optimizer
    optimizer = optim.Adam(model.parameters())
    criterion = nn.BCEWithLogitsLoss()

    # for gpu environment
    if args['gpu'] is True and args['gpu_number'] is not None:
        torch.cuda.set_device(args['gpu_number'])
        device = torch.device('cuda')
        model = model.to(device)
        criterion = criterion.to(device)
    else:
        device = torch.device('cpu')
        model = model.to(device)
        criterion = criterion.to(device)

    logger.info(f"Number of testing samples: {len(test_iterator.dataset)}")

    logger.info("***** Testing *****")
    _, test_acc, test_auc, test_ap, test_eer, test_prec, test_rec, test_f1 = test_run(
        model, test_iterator, criterion, device)
    logger.info(
        f'| Test Accuracy: {test_acc:.3f} | Test AUC: {test_auc:.3f} | Test AP: {test_ap:.3f} | Test EER: {test_eer:.3f} | Test Precision: {test_prec:.3f} |  Test Recall: {test_rec:.3f} | Test F1: {test_f1:.3f} |'
    )
def main(file_name):
#	print(open(FilePaths.fnAccuracy).read())
	decoderType = Model.DecoderType.BestPath
	model = Model.Model(open(FilePaths.fnCharList).read(), decoderType, mustRestore=True)
	filelist=glob.glob("new1/*.png")
	filename = file_name+'.txt'
	f=open(filename,'w')
	for file in filelist:
		os.remove(file)
	for i in FilePaths.fnInfer:
		if i[-5]=='e':
			f.write('\n')
		else:  
			print("processing ",i)
			img=preprocessimg(i)
			implt(img)
		    #img=cv2.imread(i)
		    #img=contrast(img)
		    #implt(img)
			cv2.imwrite("out123.png",img)
			FilePaths.index = FilePaths.index + 1
			infer(model,"out123.png", f)
	f.close()
Beispiel #12
0
def main(_):
    train_data = context_of_idx

    with tf.Graph().as_default(), tf.Session(config=config_tf) as session:
        initializer = tf.random_uniform_initializer(-config.init_scale,
                                                    config.init_scale)
        with tf.variable_scope("model", reuse=None, initializer=initializer):
            m = Model.Model(is_training=True, config=config)

        tf.global_variables_initializer().run()

        model_saver = tf.train.Saver(tf.global_variables())

        for i in range(config.iteration):
            print("Training Epoch: %d ..." % (i + 1))
            train_perplexity = run_epoch(session, m, train_data, m.train_op)
            print("Epoch: %d Train Perplexity: %.3f" %
                  (i + 1, train_perplexity))

            if (i + 1) % config.save_freq == 0:
                print('model saving ...')
                model_saver.save(session, config.model_path + '-%d' % (i + 1))
                print('Done!')
Beispiel #13
0
def model():
    view = View()
    face = Face()
    model = Model(view)
    model.face = face
    yield model
    st.markdown("Draw a digit and then press 'Get prediction'")

    # Create a canvas component
    canvas_result = st_canvas(
        stroke_width=20,
        stroke_color="#fff",
        background_color="#000",
        update_streamlit=True,
        height=280,
        width=280,
        drawing_mode="freedraw",
        key="canvas",
    )

if st.button('Get prediction'):
    model = Model()

    # Instantiate an Image object from the handwritten canvas
    image = Image(canvas_result.image_data)

    with col2:
        # Display a h2 title
        st.subheader("What the computer see")
        st.markdown("Your drawing is resized and gray-scaled")

        # Display the transformed image
        if image.array is not None:
            st.image(image.get_streamlit_displayable(), width=280)

    # Check if the user has written something
    if (image.array is not None) and (not image.is_empty()):
Beispiel #15
0
        },
        'Normalize': {}
    },
    # training params
    'gpus': 1,
    'precision': 16,
    'max_epochs': 10,
    'val_batches': 1.0,
    'es_start_from': 3,
    'patience': 3
}

dm = DataModule(file='data_extra' if config['extra_data'] else 'data_old',
                **config)

model = Model(config)

wandb_logger = WandbLogger(project="cassava", config=config)

es = MyEarlyStopping(monitor='val_acc',
                     mode='max',
                     patience=config['patience'])
checkpoint = ModelCheckpoint(
    dirpath='./',
    filename=f'{config["backbone"]}-{config["size"]}-{{val_acc:.5f}}',
    save_top_k=1,
    monitor='val_acc',
    mode='max')
lr_monitor = LearningRateMonitor(logging_interval='step')

trainer = pl.Trainer(gpus=config['gpus'],
def main(_):
    with tf.Graph().as_default(), tf.Session(config=config_tf) as session:
        config.batch_size = 1
        config.num_steps = 1

        initializer = tf.random_uniform_initializer(-config.init_scale,
                                                    config.init_scale)
        with tf.variable_scope("model", reuse=None, initializer=initializer):
            mtest = Model.Model(is_training=False, config=config)

        # tf.global_variables_initializer().run()

        model_saver = tf.train.Saver()
        print('model loading ...')
        model_saver.restore(session,
                            config.model_path + '-%d' % config.save_time)
        print('Done!')

        if not is_beams:
            # sentence state
            char_list = list(start_sentence)
            print(char_list)
            start_idx = char_to_idx[char_list[0]]
            print(start_idx)
            # _state = mtest.initial_state.eval()
            _state = tf.get_default_session().run(mtest.initial_state)
            test_data = np.int32([start_idx])
            print(test_data)
            prob, _state = run_epoch(session, mtest, test_data, tf.no_op(),
                                     _state)
            # print(prob)
            # print(_state)
            gen_res = [char_list[0]]
            for i in range(1, len(char_list)):
                char = char_list[i]
                try:
                    char_index = char_to_idx[char]
                except KeyError:
                    char_index = np.argmax(prob.reshape(-1))
                prob, _state = run_epoch(session,
                                         mtest, np.int32([char_index]),
                                         tf.no_op(), _state)
                gen_res.append(char)
            print(gen_res)
            # gen text
            if is_sample:
                gen = np.random.choice(config.vocab_size,
                                       1,
                                       p=prob.reshape(-1))
                gen = gen[0]
            else:
                gen = np.argmax(prob.reshape(-1))
            test_data = np.int32(gen)
            gen_res.append(idx_to_char[gen])
            for i in range(len_of_generation - 1):
                prob, _state = run_epoch(session, mtest, test_data, tf.no_op(),
                                         _state)
                if is_sample:
                    gen = np.random.choice(config.vocab_size,
                                           1,
                                           p=prob.reshape(-1))
                    gen = gen[0]
                else:
                    gen = np.argmax(prob.reshape(-1))
                test_data = np.int32(gen)
                gen_res.append(idx_to_char[gen])
            print('Generated Result: ', ''.join(gen_res))
        else:
            # sentence state
            char_list = list(start_sentence)
            start_idx = char_to_idx[char_list[0]]
            # _state = mtest.initial_state.eval()
            _state = tf.get_default_session().run(mtest.initial_state)
            beams = [(0.0, [idx_to_char[start_idx]], idx_to_char[start_idx])]
            test_data = np.int32([start_idx])
            prob, _state = run_epoch(session, mtest, test_data, tf.no_op(),
                                     _state)
            y1 = np.log(1e-20 + prob.reshape(-1))
            beams = [(beams[0][0], beams[0][1], beams[0][2], _state)]
            for i in range(1, len(char_list)):
                char = char_list[i]
                try:
                    char_index = char_to_idx[char]
                except KeyError:
                    top_indices = np.argsort(-y1)
                    char_index = top_indices[0]
                prob, _state = run_epoch(session,
                                         mtest, np.int32([char_index]),
                                         tf.no_op(), beams[0][3])
                y1 = np.log(1e-20 + prob.reshape(-1))
                beams = [(beams[0][0], beams[0][1] + [char], char_index,
                          _state)]
            # gen text
            if is_sample:
                top_indices = np.random.choice(config.vocab_size,
                                               beam_size,
                                               replace=False,
                                               p=prob.reshape(-1))
            else:
                top_indices = np.argsort(-y1)
            b = beams[0]
            beam_candidates = []
            for i in range(beam_size):
                wordix = top_indices[i]
                beam_candidates.append(
                    (b[0] + y1[wordix], b[1] + [idx_to_char[wordix]], wordix,
                     _state))
            beam_candidates.sort(key=lambda x: x[0],
                                 reverse=True)  # decreasing order
            beams = beam_candidates[:beam_size]  # truncate to get new beams
            for xy in range(len_of_generation - 1):
                beam_candidates = []
                for b in beams:
                    test_data = np.int32(b[2])
                    prob, _state = run_epoch(session, mtest, test_data,
                                             tf.no_op(), b[3])
                    y1 = np.log(1e-20 + prob.reshape(-1))
                    if is_sample:
                        top_indices = np.random.choice(config.vocab_size,
                                                       beam_size,
                                                       replace=False,
                                                       p=prob.reshape(-1))
                    else:
                        top_indices = np.argsort(-y1)
                    for i in range(beam_size):
                        wordix = top_indices[i]
                        beam_candidates.append(
                            (b[0] + y1[wordix], b[1] + [idx_to_char[wordix]],
                             wordix, _state))
                beam_candidates.sort(key=lambda x: x[0],
                                     reverse=True)  # decreasing order
                beams = beam_candidates[:
                                        beam_size]  # truncate to get new beams

            print('Generated Result: ', ''.join(beams[0][1]))
Beispiel #17
0
def test_start():
    Model(View()).start(None)
    Model(View()).start(ModelFitter)
Beispiel #18
0
def test_close():
    model = Model(View())
    model.start()
    model.close()
Beispiel #19
0
fitting_settings = None
with open(args.config) as config:
    fitting_settings = json.load(config)

fitters = fitting_settings['fitters']

face_parameters = fitting_settings['input'].get('initial_face', {})

coefficients = face_parameters.get('coefficients', [])
directed_light = face_parameters.get('directed_light', (0., 0., 0.))
ambient_light = face_parameters.get('ambient_light', 0.)
initial_face = Face(coefficients=coefficients,
                    directed_light=directed_light,
                    ambient_light=ambient_light)

model_filename = get_datafile_path(fitting_settings['input']['input_image'])
image = Image.open(model_filename).convert('L')
original_data = array(image.getdata()).astype('f') / 255
image_data = original_data.reshape(image.size)[::-1, :].flatten()
image.close()

MFM.init()
view = View((500, 500))
model = Model(view)
model_input = ModelInput(model)

chain = FittersChain(fitters, image_data, model, initial_face=initial_face)

model.start(chain)
Beispiel #20
0
def test_redraw():
    model = Model(View())
    model.start(None)
    model.redraw()
    model.redraw(lambda: None)
Beispiel #21
0
def test_constructor():
    assert isinstance(Model(View()), Model)
def train():
    logger.info("***** Setup *****")
    logger.info(f"Configs: {args}")

    # make iterators
    data_proceessor = DataProcessor()
    train_data, val_data, pos_weight = data_proceessor.get_data(
        args['split_rate'], args['data_ratio'], args['seed'])
    train_iterator = DataLoader(train_data,
                                batch_size=args["batch_size"],
                                shuffle=True)
    val_iterator = DataLoader(val_data,
                              batch_size=args["batch_size"],
                              shuffle=True)

    # build a model
    model = Model(input_dim=28 * 28,
                  hidden_dim=args['hidden_dim'],
                  drop_rate=args['drop_rate'])

    # define an optimizer
    optimizer = optim.Adam(model.parameters())
    if args['loss_correction'] is True:
        pos_weight = torch.tensor(pos_weight)
        criterion = nn.BCEWithLogitsLoss(pos_weight=pos_weight)
    else:
        criterion = nn.BCEWithLogitsLoss()

    # additional settings (e.g., early stopping)
    early_stopping = EarlyStopping(logger,
                                   patience=args['patience'],
                                   verbose=True)

    # for gpu environment
    if args['gpu'] is True and args['gpu_number'] is not None:
        torch.cuda.set_device(args['gpu_number'])
        device = torch.device('cuda')
        model = model.to(device)
        criterion = criterion.to(device)
    else:
        device = torch.device('cpu')
        model = model.to(device)
        criterion = criterion.to(device)

    logger.info(f"Number of training samples: {len(train_iterator.dataset)}")
    logger.info(f"Number of validation samples: {len(val_iterator.dataset)}")

    logger.info("***** Training *****")
    _history = []
    for epoch in range(args['epochs']):
        train_loss, train_acc = train_run(model, train_iterator, optimizer,
                                          criterion, device)
        logger.info(
            f'| Epoch: {epoch+1:02} | Train Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.3f}% |'
        )
        val_loss, val_acc = eval_run(model, val_iterator, criterion, device)
        logger.info(
            f'| Val. Loss: {val_loss:.3f} | Val. Acc: {val_acc*100:.3f}% |')
        _history.append([train_loss, train_acc, val_loss, val_acc])

        # early stopping
        early_stopping(val_loss)
        if early_stopping.early_stop:
            logger.info(f'\tEarly stopping at {epoch+1:02}')
            if args['save_model'] is True:
                save_model(model)
            break

    else:  # end of the for loop
        if args['save_model'] is True:
            save_model(model)

    logger.info("***** Evaluation *****")
    # plot loss
    _history = np.array(_history)
    plt.figure()
    plt.plot(np.arange(len(_history)), _history[:, 0], label="train")
    plt.plot(np.arange(len(_history)), _history[:, 2], label='validation')
    plt.grid(True)
    plt.legend()
    plt.title("Training Monitoring")
    plt.xlabel("Epoch")
    plt.ylabel("Loss")
    plt.savefig('./fig/{}/loss.png'.format(run_start_time),
                bbox_inches="tight",
                pad_inches=0.1)

    # draw figures for evaluation
    _, _, val_auc, val_ap, val_eer, val_prec, val_rec, val_f1 = test_run(
        model, val_iterator, criterion, device)
    logger.info(
        f'| Val. AUC: {val_auc:.3f} | Val. AP: {val_ap:.3f} | Val. EER: {val_eer:.3f} | Val. Precision: {val_prec:.3f} |  Val. Recall: {val_rec:.3f} | Val. F1: {val_f1:.3f} |'
    )
def main(_):
    tf.reset_default_graph()
    graph = tf.Graph()
    with graph.as_default() as g:
        with tf.Session(config=config_tf, graph=g) as session:
            config.batch_size = 1
            config.num_steps = 1
            # 以上两个参数决定了传入模型数值的size

            initializer = tf.random_uniform_initializer(
                -config.init_scale, config.init_scale)
            with tf.variable_scope("model",
                                   reuse=None,
                                   initializer=initializer):
                mtest = Model.Model(is_training=False, config=config)

            # tf.global_variables_initializer().run()

            start = datetime.datetime.now()
            model_saver = tf.train.Saver()
            print('model loading ...')
            model_saver.restore(
                session, config.model_path + '_' +
                data_dir[data_dir.rfind('/') + 1:-4] + '_' +
                str(config.num_layers) + '_' + str(config.hidden_size))
            print('Done!')
            print(datetime.datetime.now() - start)

            # initial prefix list and LUT
            sorted_first_prob = sorted(first_prob.items(),
                                       key=lambda x: x[1],
                                       reverse=True)
            prefixes = Queue()
            # prefixes = [] #入队按照概率大到小
            LUT = {}  # key-value( prefix-prob )
            for (key, value) in sorted_first_prob:
                prefixes.put(key)
                LUT[key] = value
            # initial prefix list and LUT

            # file operation
            f_id = 1
            generate_path = config.dictionary_path + data_dir[data_dir.rfind('/') + 1:-4] + '_' + \
                            str(config.num_layers) + '_' + str(config.hidden_size) + '/'

            if not os.path.exists(generate_path):
                os.mkdir(generate_path)
            write_star = datetime.datetime.now()
            f = open(generate_path + str(f_id) + '.txt', 'w', encoding='utf-8')
            # file operation

            # initial variable
            code_num = 0
            end_flag = False
            # initial variable

            i = 1
            while not prefixes.empty():
                # current item
                current_prefix = prefixes.get()
                current_prob = LUT[current_prefix]
                LUT.pop(current_prefix)
                # current item

                # get current prefix's next char probability distribution
                # _state = mtest.initial_state.eval()
                # for c in current_prefix:
                #     test_data = np.array([char_to_idx[c]], dtype=np.int32)
                #     prob, _state = run_epoch(session, mtest, test_data, tf.no_op(), _state)
                # probability = prob[0]
                probability = get_pro(session, mtest, current_prefix)
                # get current prefix's next char probability distribution

                # sorting the probability distribution
                next_que = PriorityQueue()
                for i in range(len(probability)):
                    next_char = idx_to_char[i]
                    next_prob = probability[i]
                    next_que.put(next_entry(next_char, next_prob))
                # sorting the probability distribution

                # pop the next char
                for i in range(len(probability) // 6):
                    get_next = next_que.get()
                    next_char = get_next.char
                    next_prob = get_next.prob
                    if next_char == "⊥":  # meeting end symbol
                        if len(current_prefix) >= 4:
                            print('第{}条口令为{},概率为{}'.format(
                                current_prefix, current_prob, code_num + 1))
                        f.write(current_prefix + '\n')
                        code_num += 1
                        if code_num == 5000000:
                            end_flag = True
                            break
                        if code_num % 1000000 == 0:
                            print('{}.txt耗时:{}'.format(
                                f_id,
                                datetime.datetime.now() - write_star))
                            write_star = datetime.datetime.now()
                            f_id += 1
                            f = open(config.dictionary_path + str(f_id) +
                                     '.txt',
                                     'w',
                                     encoding='utf-8')
                    else:
                        new_prefix = current_prefix + next_char
                        if len(new_prefix) == 17:
                            print('第{}条口令为{},概率为{}'.format(
                                current_prefix, current_prob, code_num + 1))
                            f.write(current_prefix + '\n')
                            code_num += 1
                            if code_num == 5000000:
                                end_flag = True
                                break
                            if code_num % 1000000 == 0:
                                print('{}.txt耗时:{}'.format(
                                    f_id,
                                    datetime.datetime.now() - write_star))
                                write_star = datetime.datetime.now()
                                f_id += 1
                                f = open(config.dictionary_path + str(f_id) +
                                         '.txt',
                                         'w',
                                         encoding='utf-8')
                        else:
                            new_prob = current_prob * next_prob
                            if new_prob > threshold:
                                LUT[new_prefix] = new_prob
                                prefixes.put(new_prefix)
                # pop the next char

                if end_flag:
                    break

            print('completed')
Beispiel #24
0
import json
import logging

import pandas as pd
from flask import Flask, request
from flask_cors import cross_origin

from src import Model

logging.basicConfig(format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
logger = logging.getLogger('Controller')
logger.setLevel(logging.INFO)

app = Flask(__name__)

model = Model.Model()


@app.route('/predict', methods=['POST'])
@cross_origin(origin='*')
def predict():
    try:
        raw = request.json
        logger.info("Incoming request", raw)
        series = pd.Series(raw, raw.keys())
        res = model.predict(series)
        print(res)
        ans = res.tolist()
    except Exception as e:
        logger.error(e)
        ans = str(e)