Пример #1
0
def main():
    parser = _setup_parser()
    args = parser.parse_args()
    data = Data()
    model.load(args.load_path)

    evaluate(data)
Пример #2
0
def detector(model_fname, in_fname, out_fname=None):
    # Load trained model
    model.load(model_fname)

    # Read input image data
    im = Image.open(in_fname)
    arr = np.array(im)[:, :, 0:3]
    shape = arr.shape

    # Set output fname
    if not out_fname:
        out_fname = os.path.splitext(in_fname)[0] + '_detection.png'

    # Create detection variables
    detections = np.zeros((shape[0], shape[1]), dtype='uint8')
    output = np.copy(arr)

    # Sliding window parameters
    step = 2
    win = 20

    # Loop through pixel positions
    print('Processing...')
    for i in range(0, shape[0] - win, step):
        print('row %1.0f of %1.0f' % (i, (shape[0] - win - 1)))

        for j in range(0, shape[1] - win, step):

            # Extract sub chip
            chip = arr[i:i + win, j:j + win, :]

            # Predict chip label
            prediction = model.predict_label([chip / 255.])[0][0]

            # Record positive detections
            if prediction == 1:
                detections[i + int(win / 2), j + int(win / 2)] = 1

    # Process detection locations
    dilation = ndimage.binary_dilation(detections, structure=np.ones((3, 3)))
    labels, n_labels = ndimage.label(dilation)
    center_mass = ndimage.center_of_mass(dilation, labels,
                                         np.arange(n_labels) + 1)

    # Loop through detection locations
    if type(center_mass) == tuple: center_mass = [center_mass]
    for i, j in center_mass:
        i = int(i - win / 2)
        j = int(j - win / 2)

        # Draw bouding boxes in output array
        output[i:i + win, j:j + 2, 0:3] = [255, 0, 0]
        output[i:i + win, j + win - 2:j + win, 0:3] = [255, 0, 0]
        output[i:i + 2, j:j + win, 0:3] = [255, 0, 0]
        output[i + win - 2:i + win, j:j + win, 0:3] = [255, 0, 0]

    # Save output image
    outIm = Image.fromarray(output)
    outIm.save(out_fname)
Пример #3
0
 def __init__(self, name):
   with tf.Session(graph=model.graph) as sess:
     model.load(sess, '../../model/'+name+'.ckpt')
     h_conv1, h_conv2, h_fc1, y_conv = sess.run(
             (model.h_conv1, model.h_conv2, model.h_fc1, model.y_conv),
             feed_dict={model.x: model.x_test, model.keep_prob: 1.0})
     self.h_conv1 = np.transpose(h_conv1, (3, 0, 1, 2)).reshape((32, 10000*28*28))
     self.h_conv2 = np.transpose(h_conv2, (3, 0, 1, 2)).reshape((64, 10000*14*14))
     self.h_fc1 = np.transpose(h_fc1, (1, 0))
     self.y_conv = np.transpose(y_conv, (1, 0))
Пример #4
0
 def __init__(self, name):
   with tf.Session(graph=model.graph) as sess:
     model.load(sess, '../../model/'+name+'.ckpt')
     W_conv2, W_fc1, W_fc2 = sess.run(
             (model.W_conv2, model.W_fc1, model.W_fc2))
     W_conv2 = np.fabs(W_conv2)
     W_fc1 = np.fabs(W_fc1)
     W_fc2 = np.fabs(W_fc2)
     W_conv2 = np.transpose(W_conv2, (2, 0, 1, 3)).reshape((32, 5*5*64))
     W_fc1 = W_fc1.reshape((7, 7, 64, 1024))
     W_fc1 = np.transpose(W_fc1, (2, 0, 1, 3)).reshape((64, 7*7*1024))
     self.W_conv1 = np.sum(W_conv2, axis=1)
     self.W_conv2 = np.sum(W_fc1, axis=1)
     self.W_fc1 = np.sum(W_fc2, axis=1)
Пример #5
0
def significance(network, layer):
    with tf.Session(graph=model.graph) as sess:
        sess.run(tf.global_variables_initializer())
        model.load(sess, '../../model/' + network + '.ckpt')
        layer = getattr(model, layer)
        grad = tf.gradients(model.cross_entropy, layer)
        grad = sess.run(grad,
                        feed_dict={
                            model.x: model.x_test,
                            model.y: model.y_test,
                            model.keep_prob: 1.0
                        })
        grad = np.fabs(grad)
        grad = np.sum(grad, (0, 1, 2, 3)[:grad.ndim - 1])
        return grad
Пример #6
0
def predict(args):
    print("Read Features from: ", args.features, file=sys.stderr)
    feat = features.Features(args.features)
    m = model.load(args.model)
    m.summary(150)
    punctuation_vocabulary = data.toDict(data.PUNCTUATION_VOCABULARY)
    reverse_punctuation_vocabulary = {v: k for k, v in punctuation_vocabulary.items()}
    input_text = io.open(args.test, 'r', encoding='utf-8').read()

    if len(input_text) == 0:
        sys.exit("Input text missing.")
    text = [w for w in input_text.split() if w not in punctuation_vocabulary] + [data.END]
    predict = lambda x: m.predict(x, verbose=0)
    f_out = open(args.out, 'w', encoding='utf-8')
    restore(f_out, text, feat, reverse_punctuation_vocabulary, predict)
Пример #7
0
def train(opt, logging):

    ## Data Prepare ##
    if opt.main_proc:
        logging.info("Building dataset")
                                           
    train_dataset = DeepSpeakerDataset(opt, os.path.join(opt.dataroot, 'dev'))
    train_loader = DeepSpeakerDataLoader(train_dataset, batch_size=1, num_workers=opt.num_workers, shuffle=True, pin_memory=True)
             
    val_dataset = DeepSpeakerTestDataset(opt, os.path.join(opt.dataroot, 'test'))
    val_loader = DeepSpeakerTestDataLoader(val_dataset, batch_size=1, num_workers=opt.num_workers, shuffle=False, pin_memory=True)
    
    opt.in_size = train_dataset.in_size
    opt.out_size = train_dataset.class_nums  
    print('opt.in_size {} opt.out_size {}'.format(opt.in_size, opt.out_size))  
                                           
    if opt.main_proc:
        logging.info("Building dataset Sucessed")
    
    ##  Building Model ##
    if opt.main_proc:
        logging.info("Building Model")
    
    opt.model_type = opt.model_type_1
    model_1 = model_select(opt, seq_training=False) ## rnn ge2e
    opt.model_type = opt.model_type_2
    model_2 = model_select(opt, seq_training=False) ## cnn class
    embedding_size = opt.embedding_size
    opt.embedding_size = 2 * embedding_size
    margin = margin_select(opt)
    opt.embedding_size = embedding_size
    
    if opt.resume_1:
        model_1, opt.total_iters = load(model_1, opt.resume_1, 'state_dict')    
    if opt.resume_2:
        model_2, opt.total_iters = load(model_2, opt.resume_2, 'state_dict')
        margin, opt.total_iters = load(margin, opt.resume_2, 'margin_state_dict')
        
    if opt.resume:
        model_1, opt.total_iters = load(model_1, opt.resume, 'state_dict_1')
        model_2, opt.total_iters = load(model_2, opt.resume, 'state_dict_2')
        margin, opt.total_iters = load(margin, opt.resume, 'margin_state_dict')
        
    # define optimizers for different layer
    criterion = torch.nn.CrossEntropyLoss().to(opt.device)
    if opt.optim_type == 'sgd':
        optimizer = optim.SGD([
            {'params': model_1.parameters(), 'weight_decay': 5e-4},
            {'params': model_2.parameters(), 'weight_decay': 5e-4},
            {'params': margin.parameters(), 'weight_decay': 5e-4},
        ], lr=opt.lr, momentum=0.9, nesterov=True)
    elif opt.optim_type == 'adam':
        optimizer = optim.Adam([
            {'params': model_1.parameters(), 'weight_decay': 5e-4},
            {'params': model_2.parameters(), 'weight_decay': 5e-4},
            {'params': margin.parameters(), 'weight_decay': 5e-4},
        ], lr=opt.lr, betas=(opt.beta1, 0.999))
        
    scheduler = lr_scheduler.StepLR(optimizer=optimizer, step_size=opt.lr_reduce_step, gamma=opt.lr_reduce_factor, last_epoch=-1)
        
    model_1.to(opt.device)
    model_2.to(opt.device)
    margin.to(opt.device)
    
    if opt.distributed:
        model_1 = DistributedDataParallel(model_1, device_ids=[opt.local_rank], output_device=opt.local_rank)
        model_2 = DistributedDataParallel(model_2, device_ids=[opt.local_rank], output_device=opt.local_rank)
        margin  = DistributedDataParallel(margin, device_ids=[opt.local_rank], output_device=opt.local_rank)
    if opt.main_proc:
        print(model_1)
        print(model_2)
        print(margin)
        logging.info("Building Model Sucessed") 
        
    best_perform_acc = 1.0
    
    losses = utils.AverageMeter()
    class_losses = utils.AverageMeter()
    embedding_losses = utils.AverageMeter()
    penalty_losses = utils.AverageMeter()

    # Initial performance
    if opt.main_proc:
        EER = union_evaluate(opt, model_1, model_2, val_loader, logging)
        best_perform_acc = EER
        print('>>Start performance: EER = {}<<'.format(best_perform_acc))
    
    save_model = model_1
    if isinstance(model_1, DistributedDataParallel):
        save_model = model_1.module
                            
    # Start Training
    total_iters = opt.total_iters
    for epoch in range(1, opt.total_epoch + 1):
        while True:
            model_1.train()
            model_2.train()
            margin.train()
            for i, (data) in enumerate(train_loader, start=0):
                if i == len(train_loader):
                    break

                optimizer.zero_grad()

                # Perform forward and Obtain the loss
                feature_input, spk_ids = data               
                feature_input = feature_input.to(opt.device)
                label = spk_ids.to(opt.device).squeeze(0)
                
                output_1, attn_1, w_1, b_1 = model_1(feature_input)                                
                output_2, attn_2, w_2, b_2 = model_2(feature_input)                
                margin_input = torch.cat((output_1, output_2), dim=1)
                margin_output = margin(margin_input, label)
                
                output_1 = save_model.normalize(output_1)  
                sim_matrix_out = save_model.similarity(output_1, w_1, b_1)  
                embedding_loss = opt.embedding_loss_lamda / (opt.speaker_num * opt.utter_num) * save_model.loss_cal(sim_matrix_out) 
                if opt.att_type == 'multi_attention' and attn_1 is not None:
                    penalty_loss = opt.penalty_loss_lamda * save_model.penalty_loss_cal(attn_1)
                else:
                    penalty_loss = 0
                class_loss = opt.class_loss_lamda * criterion(margin_output, label)
                loss = embedding_loss + penalty_loss + class_loss
                
                loss_dict_reduced = reduce_loss_dict(opt, {'embedding_loss': embedding_loss, 'penalty_loss': penalty_loss, 'class_loss': class_loss})                
                losses_reduced = sum(loss for loss in loss_dict_reduced.values())
                loss_value = losses_reduced.item()
                embedding_loss_value = loss_dict_reduced['embedding_loss'].item()
                penalty_loss_value = loss_dict_reduced['penalty_loss'].item()
                class_loss_value = loss_dict_reduced['class_loss'].item()

                # Check the loss and avoid the invaided loss
                inf = float("inf")
                if loss_value == inf or loss_value == -inf:
                    print("WARNING: received an inf loss, setting loss value to 0")
                    loss_value = 0
                    embedding_loss_value = 0
                    penalty_loss_value = 0
                    class_loss_value = 0
                    continue

                # Perform backward and Check and update the grad
                loss.backward()
                if utils.check_grad(model_1.parameters(), opt.clip_grad, opt.ignore_grad) or utils.check_grad(model_2.parameters(), opt.clip_grad, opt.ignore_grad):
                    if opt.main_proc:
                        logging.info('Not a finite gradient or too big, ignoring')
                    optimizer.zero_grad()
                    continue
                optimizer.step()
    
                total_iters += opt.num_gpus

                # Update the loss for logging
                losses.update(loss_value)
                embedding_losses.update(embedding_loss_value)
                penalty_losses.update(penalty_loss_value)
                class_losses.update(class_loss_value)

                # Print the performance on the training dateset 'opt': opt, 'learning_rate': lr,
                if total_iters % opt.print_freq == 0:
                    scheduler.step(total_iters)
                    if opt.main_proc:
                        lr = scheduler.get_lr()
                        if isinstance(lr, list):
                            lr = max(lr)
                        logging.info('==> Train set steps {} lr: {:.6f}, loss: {:.4f} [ class: {:.4f}, embedding: {:.4f}, penalty_loss {:.4f}]'.format(
                                     total_iters, lr, losses.avg, class_losses.avg, embedding_losses.avg, penalty_losses.avg))
        
                        if opt.distributed:
                            model_state_dict_1 = model_1.module.state_dict()
                            model_state_dict_2 = model_2.module.state_dict()
                            margin_state_dict = margin.module.state_dict()
                        else:
                            model_state_dict_1 = model_1.state_dict()
                            model_state_dict_2 = model_2.state_dict()
                            margin_state_dict = margin.state_dict()
                        state = {'state_dict_1': model_state_dict_1, 'total_iters': total_iters,
                                 'state_dict_2': model_state_dict_2, 'margin_state_dict': margin_state_dict}
                        filename = 'newest_model.pth'
                        if os.path.isfile(os.path.join(opt.model_dir, filename)):
                            shutil.copy(os.path.join(opt.model_dir, filename), os.path.join(opt.model_dir, 'newest_model.pth_bak'))
                        utils.save_checkpoint(state, opt.model_dir, filename=filename)

                # Validate the trained model
                if total_iters % opt.validate_freq == 0:
                    EER = union_evaluate(opt, model_1, model_2, val_loader, logging)
                    ##scheduler.step(EER)
                    
                    if opt.main_proc and EER < best_perform_acc:
                        best_perform_acc = EER
                        print("Found better validated model (EER = %.3f), saving to model_best.pth" % (best_perform_acc))
                        
                        if opt.distributed:
                            model_state_dict_1 = model_1.module.state_dict()
                            model_state_dict_2 = model_2.module.state_dict()
                            margin_state_dict = margin.module.state_dict()
                        else:
                            model_state_dict_1 = model_1.state_dict()
                            model_state_dict_2 = model_2.state_dict()
                            margin_state_dict = margin.state_dict()
                        state = {'state_dict_1': model_state_dict_1, 'total_iters': total_iters,
                                 'state_dict_2': model_state_dict_2, 'margin_state_dict': margin_state_dict}
                        
                        filename = 'model_best.pth'
                        if os.path.isfile(os.path.join(opt.model_dir, filename)):
                            shutil.copy(os.path.join(opt.model_dir, filename), os.path.join(opt.model_dir, 'model_best.pth_bak'))
                        utils.save_checkpoint(state, opt.model_dir, filename=filename)                             
    
                    model_1.train()
                    model_2.train()
                    margin.train()
                    losses.reset()
                    class_losses.reset()
                    embedding_losses.reset()
                    penalty_losses.reset()
    
                if total_iters > opt.max_iters and opt.main_proc:
                    logging.info('finish training, steps is  {}'.format(total_iters))
                    return model_1
Пример #8
0
        opt.device = torch.device("cpu")

    ## Data Prepare ##
    print("Building dataset")
    val_dataset = DeepSpeakerUttDataset(opt, opt.dataroot)
    val_loader = DeepSpeakerUttDataLoader(val_dataset,
                                          batch_size=1,
                                          num_workers=opt.num_workers,
                                          shuffle=False)
    opt.in_size = val_dataset.in_size
    print('opt.in_size = {}'.format(opt.in_size))
    print("Building dataset Sucessed")

    ##  Building Model ##
    print("Building Model")
    seq_training = False
    if opt.loss_type.split('_')[0] == 'class-seq' or opt.loss_type.split(
            '_')[0] == 'seq':
        seq_training = True
    model = model_select(opt, seq_training)
    if opt.resume:
        model, opt.total_iters = load(model, opt.resume, 'state_dict')
    else:
        raise Exception('wrong opt.resume {}'.format(opt.resume))
    model.to(opt.device)
    print(model)
    print("Building Model Sucessed")

    ## model testing ##
    evaluate(opt, model, val_loader)
Пример #9
0
def train(opt, logging):
    
    ## Data Prepare ##
    if opt.main_proc:
        logging.info("Building dataset")
                                           
    train_dataset = DeepSpeakerUttDataset(opt, os.path.join(opt.dataroot, 'train'))
    if not opt.distributed:
        train_sampler = BucketingSampler(train_dataset, batch_size=opt.batch_size)
    else:
        train_sampler = DistributedBucketingSampler(train_dataset, batch_size=opt.batch_size,
                                                    num_replicas=opt.num_gpus, rank=opt.local_rank)
    train_loader = DeepSpeakerUttDataLoader(train_dataset, num_workers=opt.num_workers, batch_sampler=train_sampler)
             
    val_dataset = DeepSpeakerTestDataset(opt, os.path.join(opt.dataroot, 'test'))
    val_loader = DeepSpeakerTestDataLoader(val_dataset, batch_size=1, num_workers=opt.num_workers, shuffle=False, pin_memory=True)
    
    opt.in_size = train_dataset.in_size
    opt.out_size = train_dataset.class_nums  
    print('opt.in_size {} opt.out_size {}'.format(opt.in_size, opt.out_size))  
                                           
    if opt.main_proc:
        logging.info("Building dataset Sucessed")
    
    ##  Building Model ##
    if opt.main_proc:
        logging.info("Building Model")
    
    model = model_select(opt)
    margin = margin_select(opt)
    
    if opt.resume:
        model, opt.total_iters = load(model, opt.resume, 'state_dict')
        margin, opt.total_iters = load(margin, opt.resume, 'margin_state_dict')
    
    # define optimizers for different layer
    criterion = torch.nn.CrossEntropyLoss().to(opt.device)
    if opt.optim_type == 'sgd':
        optimizer = optim.SGD([
            {'params': model.parameters(), 'weight_decay': 5e-4},
            {'params': margin.parameters(), 'weight_decay': 5e-4}
        ], lr=opt.lr, momentum=0.9, nesterov=True)
    elif opt.optim_type == 'adam':
        optimizer = optim.Adam([
            {'params': model.parameters(), 'weight_decay': 5e-4},
            {'params': margin.parameters(), 'weight_decay': 5e-4}
        ], lr=opt.lr, betas=(opt.beta1, 0.999))
    elif opt.optim_type == 'radam':
        optimizer = RAdam([
            {'params': model.parameters(), 'weight_decay': 5e-4},
            {'params': margin.parameters(), 'weight_decay': 5e-4}
        ], lr=opt.lr)
        
    scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[10, 20, 40], gamma=0.1)
        
    model.to(opt.device)
    margin.to(opt.device)
    
    if opt.distributed:
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[opt.local_rank],
                                                          output_device=opt.local_rank)
        margin = torch.nn.parallel.DistributedDataParallel(margin, device_ids=[opt.local_rank],
                                                           output_device=opt.local_rank)
    if opt.main_proc:
        print(model)
        print(margin)
        logging.info("Building Model Sucessed") 
        
    best_perform_eer = 1.0
    
    losses = utils.AverageMeter()
    acc = utils.AverageMeter()

    # Initial performance
    if opt.main_proc:
        EER = evaluate(opt, model, val_loader, logging)
        best_perform_eer = EER
        print('>>Start performance: EER = {}<<'.format(best_perform_eer))
    
    total_iters = opt.total_iters
    for epoch in range(1, opt.total_epoch + 1):
        train_sampler.shuffle(epoch)
        scheduler.step()
        # train model
        if opt.main_proc:
            logging.info('Train Epoch: {}/{} ...'.format(epoch, opt.total_epoch))
        model.train()
        margin.train()

        since = time.time()
        for i, (data) in enumerate(train_loader, start=0):
            utt_ids, inputs, targets = data
            inputs, label = inputs.to(opt.device), targets.to(opt.device)
            optimizer.zero_grad()
            
            raw_logits, attn, w, b = model(inputs)
            output = margin(raw_logits, label)
            #loss = criterion(output, label)
            loss = cal_loss(output, label, criterion, smoothing=opt.smoothing)
            loss_dict_reduced = reduce_loss_dict(opt, {'loss': loss})
            losses_reduced = sum(loss for loss in loss_dict_reduced.values())
            loss_value = losses_reduced.item()
            
            # Check the loss and avoid the invaided loss
            inf = float("inf")
            if loss_value == inf or loss_value == -inf:
                print("WARNING: received an inf loss, setting loss value to 0")
                loss_value = 0
                continue
                    
            loss.backward()
            if utils.check_grad(model.parameters(), opt.clip_grad, opt.ignore_grad):
                if opt.main_proc:
                    logging.info('Not a finite gradient or too big, ignoring')
                optimizer.zero_grad()
                continue
            optimizer.step()

            total_iters += opt.num_gpus
            losses.update(loss_value)
            
            # print train information
            if total_iters % opt.print_freq == 0 and opt.main_proc:
                # current training accuracy
                _, predict = torch.max(output.data, 1)
                total = label.size(0)
                correct = (np.array(predict.cpu()) == np.array(label.data.cpu())).sum()
                time_cur = (time.time() - since) / 100
                since = time.time()
                logging.info("Iters: {:0>6d}/[{:0>2d}], loss: {:.4f} ({:.4f}), train_accuracy: {:.4f}, time: {:.2f} s/iter, learning rate: {}".format(total_iters, epoch, loss_value, losses.avg, correct/total, time_cur, scheduler.get_lr()[0]))
              
            # save model
            if total_iters % opt.save_freq == 0 and opt.main_proc:
                logging.info('Saving checkpoint: {}'.format(total_iters))
                if opt.distributed:
                    model_state_dict = model.module.state_dict()
                    margin_state_dict = margin.module.state_dict()
                else:
                    model_state_dict = model.state_dict()
                    margin_state_dict = margin.state_dict()
                state = {'state_dict': model_state_dict, 'margin_state_dict': margin_state_dict, 'total_iters': total_iters,}
                filename = 'newest_model.pth'
                if os.path.isfile(os.path.join(opt.model_dir, filename)):
                    shutil.copy(os.path.join(opt.model_dir, filename), os.path.join(opt.model_dir, 'newest_model.pth_bak'))
                utils.save_checkpoint(state, opt.model_dir, filename=filename)
                    
            # Validate the trained model
            if total_iters % opt.validate_freq == 0:
                EER = evaluate(opt, model, val_loader, logging)
                ##scheduler.step(EER)
                
                if opt.main_proc and EER < best_perform_eer:
                    best_perform_eer = EER
                    logging.info("Found better validated model (EER = %.3f), saving to model_best.pth" % (best_perform_eer))
                    if opt.distributed:
                        model_state_dict = model.module.state_dict()
                        margin_state_dict = margin.module.state_dict()
                    else:
                        model_state_dict = model.state_dict()
                        margin_state_dict = margin.state_dict()
                    state = {'state_dict': model_state_dict, 'margin_state_dict': margin_state_dict, 'total_iters': total_iters,}  
                    filename = 'model_best.pth'
                    if os.path.isfile(os.path.join(opt.model_dir, filename)):
                        shutil.copy(os.path.join(opt.model_dir, filename), os.path.join(opt.model_dir, 'model_best.pth_bak'))                   
                    utils.save_checkpoint(state, opt.model_dir, filename=filename)

                model.train()
                margin.train()
                losses.reset()
                   
Пример #10
0
import tensorflow as tf
from tensorflow import keras
import model.model as model
import sys

if len(sys.argv) > 1:
    in_file = sys.argv[1]
else:
    sys.exit("Input file path argument missing")

if len(sys.argv) > 2:
    out_dir = sys.argv[2]
else:
    sys.exit("Output dir argument missing")

print('Loading')
m = model.load(in_file)
m.summary(150)

print('Saving tf')
inputsd = {"in": m.input}
outputsd = {"out": m.output}
print('================ THESE ARE IMPORTANT:===================')
print('Input name  :', m.input.name)
print('Output name :', m.output.name)
print('========================================================')

keras.experimental.export_saved_model(m, out_dir)
print('==============DONE =====================================')
print('========================================================')
Пример #11
0
try:
    # Get post data
    if os.environ["REQUEST_METHOD"] == "POST":
        data = sys.stdin.read(int(os.environ["CONTENT_LENGTH"]))

        # Convert data url to numpy array
        img_str = re.search(r'base64,(.*)', data).group(1)
        image_bytes = io.BytesIO(base64.b64decode(img_str))
        im = Image.open(image_bytes)
        arr = np.array(im)[:, :, 0:1]

        # Normalize and invert pixel values
        arr = (255 - arr) / 255.

        # Load trained model
        model.load('cgi-bin/models/model.tfl')

        # Predict class
        predictions = model.predict([arr])[0]

        # Return label data
        res['result'] = 1
        res['data'] = [float(num) for num in predictions]

except Exception as e:
    # Return error data
    res['error'] = str(e)

# Print JSON response
print("Content-type: application/json")
print("")
import flask
import time
import datetime

import sys
import os
import numpy as np
from PIL import Image
from scipy import ndimage
from model import model as tfmodel

version = 'v1'

prefix = '/opt/ml/'
model_path = os.path.join(prefix, 'model')
tfmodel.load('/opt/ml/model/model.tfl')

chip = None
data = None

# A singleton for holding the model. This simply loads the model and holds it.
# It has a predict function that does a prediction based on the model and the input data.


class ScoringService(object):

    model = None  # Where we keep the model when it's loaded
    prediction = None

    @classmethod
    def get_model(cls):
Пример #13
0
if len(sys.argv) > 3:
    m_file = sys.argv[3]
else:
    sys.exit("Model file path argument missing")

if len(sys.argv) > 4:
    output_file = sys.argv[4]
else:
    sys.exit("Output file path argument missing")
############################################################################

print('Loading vocab')
vocab = data.readVocabulary(vocab_file)
print('Loading model')
m = model.load(m_file)
m.summary(150)

punctuation_vocabulary = data.toDict(data.PUNCTUATION_VOCABULARY)

reverse_word_vocabulary = {v: k for k, v in vocab.items()}
reverse_punctuation_vocabulary = {
    v: k
    for k, v in punctuation_vocabulary.items()
}

input_text = io.open(in_file, 'r', encoding='utf-8').read()

if len(input_text) == 0:
    sys.exit("Input text missing.")