Esempio n. 1
0
 def restore_evaluate(self, args):
     args.set_hparam('run_mode', ModeKeys.EVAL.value)
     args.set_hparam('dropout_keep_prob', 1.0)
     graph = tf.Graph()
     with graph.as_default():
         model = build_model(args, False)
         model.restore()
         return model.evaluate()
Esempio n. 2
0
def train(args):
    # check run_mode
    if 'run_mode' in args:
        args.set_hparam('run_mode', ModeKeys.TRAIN.value)
    model = build_model(args)
    try:
        model.restore(use_ema=False, use_partial_loader=False)
        model.reset(
        )  # for continous training, we reset some layers to random if necessary
    except (NotFoundError, InvalidArgumentError) as e:
        logger.debug(e)
        logger.info('no available model, will train from scratch!')

    model.train()
Esempio n. 3
0
def demo(args):
    args.is_serving = True  # set it to true to ignore data set loading
    model = build_model(args)
    model.restore()
    sample_context = ''
    sample_questions = [
        'What was Maria Curie the first female recipient of?',
        'What year was Casimir Pulaski born in Warsaw?',
        'Who was one of the most famous people born in Warsaw?',
        'Who was Frédéric Chopin?',
        'How old was Chopin when he moved to Warsaw with his family?'
    ]
    sample_answers = [
        'Nobel Prize', '1745', 'Maria Skłodowska-Curie', 'Famous musicians',
        'seven months old'
    ]

    for q, g in zip(sample_questions, sample_answers):
        a = model.predict(sample_context, q)  # real work is here!
        logger.info('QUESTION: %s' % q)
        logger.info('ANSWER: %s <- GOLD: %s' % (a, g))
Esempio n. 4
0
# Test Data
dataset_test = MyDataset(ann_directory, img_directory, mode='test')
loader_test = DataLoader(dataset_test,
                         batch_size=batch_size,
                         shuffle=True,
                         collate_fn=helper.collate_fn)

# ----------------------------------------------- Set Up the Model -----------------------------------------------------

# Setting up GPU device
device = torch.device('cuda') if torch.cuda.is_available() else torch.device(
    'cpu')

# Nº of classes: background, with_mask, mask_weared_incorrect, without_mask and build model (faster r-cnn)
num_classes = 4
model = helper.build_model(num_classes)
model = model.to(device)

# Get saved model
model.load_state_dict(torch.load(PATH))

# ----------------------------------------------- Evaluation & Predictions ---------------------------------------------

# put the model in evaluation mode
model.eval()

# Evaluate the model
evaluate(model, loader_test, device=device)

# Make prediction on random image
n = randint(0, dataset_test.len)
Esempio n. 5
0
def evaluate(args):
    model = build_model(args)
    model.restore()
    return model.evaluate()