Пример #1
0
def test_cnn(root_path, step_max_devacc, max_devacc):
    dropout = 0.5
    l2_reg = 0.0001
    learning_rate = 0.001
    gpu = 0
    word_lookup_file = './data/word_vectors_pruned_300.txt'
    label_id_file = './data/nlu.label_id.txt'
    test_feature_file = './data_vds_id_test'

    sentence_length = 20
    reg_length = 8
    word_embed = 300
    dh.embedding_size = word_embed
    dh.reg_length = reg_length
    dh.sentence_length = sentence_length
    print('Reading word lookup table...')
    id2vect, word2id, id2word = dh.read_word_lookup_table(word_lookup_file)
    id2vect = np.asarray(id2vect, dtype=np.float32)

    print('Reading label id...')
    label2id, id2label = dh.read_label(label_id_file)

    print('Reading test data...')
    reg2id = {'N/A': 0}
    test_word, test_vds, test_reg, test_y = dh.read_data(test_feature_file, label2id, word2id, reg2id)

    vbs_size = 308
    vocab_size = len(word2id)
    reg_size = len(reg2id)
    num_class = len(label2id)

    cnn = CNN(num_class, id2vect, gpu, l2_reg, dropout, learning_rate, vocab_size, vbs_size, reg_size)
    test_acc = cnn.test(root_path, step_max_devacc, max_devacc, test_word, test_vds, test_reg, test_y)
    print('Test accuracy: %.3f' % test_acc)
Пример #2
0
def main():
    # Data loaders
    tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(
        num_classes=config('cnn.num_classes'))

    # Model
    model = CNN()

    # TODO: define loss function, and optimizer
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-4)
    #

    print('Number of float-valued parameters:', count_parameters(model))

    # Attempts to restore the latest checkpoint if exists
    print('Loading cnn...')
    model, start_epoch, stats = restore_checkpoint(model,
                                                   config('cnn.checkpoint'))

    axes = utils.make_cnn_training_plot()

    # Evaluate the randomly initialized model
    _evaluate_epoch(axes, tr_loader, va_loader, model, criterion, start_epoch,
                    stats)

    # Loop over the entire dataset multiple times
    for epoch in range(start_epoch, config('cnn.num_epochs')):
        # Train model
        _train_epoch(tr_loader, model, criterion, optimizer)

        # Evaluate model
        _evaluate_epoch(axes, tr_loader, va_loader, model, criterion,
                        epoch + 1, stats)

        # Save model parameters
        save_checkpoint(model, epoch + 1, config('cnn.checkpoint'), stats)

    print('Finished Training')

    y_true, y_pred = [], []
    correct, total = 0, 0
    running_loss = []
    for X, y in va_loader:
        with torch.no_grad():
            output = model(X)
            predicted = predictions(output.data)
            y_true.extend(y)
            y_pred.extend(predicted)
            total += y.size(0)
            correct += (predicted == y).sum().item()
            running_loss.append(criterion(output, y).item())
    print("Validation data accuracies:")
    print(confusion_matrix(y_true, y_pred))

    # Save figure and keep plot open
    utils.save_cnn_training_plot()
    utils.hold_training_plot()
Пример #3
0
    def __init__(self, config):
        # get config args
        self.out_dir = config['output_directory']
        self.batch_size = config['batch_size']
        self.input_dims = config['input_dimensions']
        self.learn_rate = config['learning_rate']
        self.z_dim = config['z_dimension']
        self.name = config['model_name']
        self.verbosity = config['verbosity']
        self.num_train = config['number_train']
        self.num_test = config['number_test']

        # initialize logging to create new log file and log any level event
        logging.basicConfig(format='%(asctime)s %(message)s',
                            datefmt='%m/%d/%Y %I:%M:%S %p',
                            filename='{}{}.log'.format(self.out_dir,
                                                       self.name),
                            filemode='w',
                            level=logging.INFO)

        # try to get gpu device, if not just use cpu
        self.device = \
            torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

        # initialize critic (CNN) model
        self.critic = CNN(in_chan=self.input_dims[-1], out_dim=1, out_act=None)

        # initialize generator (TransposeCNN) model
        self.generator = TransposeCNN(in_dim=self.z_dim,
                                      out_chan=self.input_dims[-1],
                                      out_act=torch.nn.Tanh())

        # initialize zdim dimensional normal distribution to sample generator
        # inputs
        self.z_dist = torch.distributions.normal.Normal(
            torch.zeros(self.batch_size, self.z_dim),
            torch.ones(self.batch_size, self.z_dim))

        # initialize bs dimensional uniform distribution to sample eps vals for
        # creating interpolations
        self.eps_dist = torch.distributions.uniform.Uniform(
            torch.zeros(self.batch_size, 1, 1, 1),
            torch.ones(self.batch_size, 1, 1, 1))

        # sample a batch of z to have constant set of generator inputs
        #  as model trains
        self.z_const = self.z_dist.sample()[:64].to(self.device)

        # initialize critic and generator optimizers
        self.crit_opt = torch.optim.Adam(self.critic.parameters(),
                                         lr=self.learn_rate)
        self.gen_opt = torch.optim.Adam(self.generator.parameters(),
                                        lr=self.learn_rate)

        # move tensors to device
        self.critic.to(self.device)
        self.generator.to(self.device)
Пример #4
0
def build_model(input_tensor, params, is_training, reuse):
    '''Creates the corresponding TensorFlow graph that needs to be executed inside a tf.Session().

    :param input_x
    :param labels
    :param dropout_rate
    :param learning_rate

    :return: Compute logits of the model (output distribution)
    '''

    # Feeding the 'inputs' data to the model
    cnn = CNN()
    cnn.build(input_tensor, params, is_training, reuse)
    logits = cnn.logits

    return logits
Пример #5
0
def main():
    # Data loaders
    tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(
        num_classes=config('cnn.num_classes'))

    # Model
    model = CNN()

    # TODO: define loss function, and optimizer
    import torch.optim as op
    import torch.nn as nn
    criterion = nn.CrossEntropyLoss()
    optimizer = op.Adam(model.parameters(), lr=config('cnn.learning_rate'))
    #

    print('Number of float-valued parameters:', count_parameters(model))

    # Attempts to restore the latest checkpoint if exists
    print('Loading cnn...')
    model, start_epoch, stats = restore_checkpoint(model,
        config('cnn.checkpoint'))

    axes = utils.make_cnn_training_plot()

    # Evaluate the randomly initialized model
    _evaluate_epoch(axes, tr_loader, va_loader, model, criterion, start_epoch,
        stats)
    
    # Loop over the entire dataset multiple times
    for epoch in range(start_epoch, config('cnn.num_epochs')):
        # Train model
        _train_epoch(tr_loader, model, criterion, optimizer)
        
        # Evaluate model
        _evaluate_epoch(axes, tr_loader, va_loader, model, criterion, epoch+1,
            stats)

        # Save model parameters
        save_checkpoint(model, epoch+1, config('cnn.checkpoint'), stats)

    print('Finished Training')

    # Save figure and keep plot open
    utils.save_cnn_training_plot()
    utils.hold_training_plot()
Пример #6
0
    def __init__(self, vocab_size, config):
        super(OCR, self).__init__()
        self.cnn = CNN()
        self.config = config

        self.transformer = Seq2Seq(
            vocab_size,
            encoder_hidden=config['seq_parameters']['encoder_hidden'],
            decoder_hidden=config['seq_parameters']['decoder_hidden'],
            img_channel=config['seq_parameters']['img_channel'],
            decoder_embedded=config['seq_parameters']['decoder_embedded'],
            dropout=config['seq_parameters']['dropout'])
Пример #7
0
def main():
    # Data loaders
    tr_loader, va_loader, te_loader, get_semantic_labels = get_train_val_test_loaders(
        num_classes=config('cnn.num_classes'))

    # Model
    model = CNN()

    # TODO: define loss function, and optimizer
    params = list(model.conv1.parameters()) + list(
        model.conv2.parameters()) + list(model.conv3.parameters())
    params = params + list(model.fc1.parameters()) + list(
        model.fc2.parameters()) + list(model.fc3.parameters())
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(params, lr=0.0001)
    #

    print('Number of float-valued parameters:', count_parameters(model))

    # Attempts to restore the latest checkpoint if exists
    print('Loading cnn...')
    model, start_epoch, stats = restore_checkpoint(model,
                                                   config('cnn.checkpoint'))

    fig, axes = utils.make_cnn_training_plot()

    # Evaluate the randomly initialized model
    _evaluate_epoch(axes, tr_loader, va_loader, model, criterion, start_epoch,
                    stats)

    # Loop over the entire dataset multiple times
    for epoch in range(start_epoch, config('cnn.num_epochs')):
        # Train model
        _train_epoch(tr_loader, model, criterion, optimizer)

        # Evaluate model
        _evaluate_epoch(axes, tr_loader, va_loader, model, criterion,
                        epoch + 1, stats)

        # Save model parameters
        save_checkpoint(model, epoch + 1, config('cnn.checkpoint'), stats)

    print('Finished Training')

    model, _, _ = restore_checkpoint(model, config('cnn.checkpoint'))

    dataset = get_data_by_label(va_loader)
    evaluate_cnn(dataset, model, criterion, get_semantic_labels)

    # Save figure and keep plot open
    utils.save_cnn_training_plot(fig)
    utils.hold_training_plot()
Пример #8
0
    def __init__(self, param, margin=0.5):
        super(MemCnnSim, self).__init__()

        self.param = param

        self.max_grad_norm = param['max_grad_norm']

        # self.memn2n = CNN(param)
        self.memn2n = MemN2N(param)
        self.cnn = CNN(param)

        self.criterion = nn.CosineEmbeddingLoss(margin=margin)
        self.optimizer = optim.Adam(self.parameters(), lr=0.00005)
Пример #9
0
    def __init__(self, param):
        super(MemN2N, self).__init__()

        self.param = param

        self.hops = self.param['hops']
        self.vocab_size = param['vocab_size']
        self.embedding_size = param['embedding_size']

        self.embedding = nn.Embedding(self.vocab_size, self.embedding_size)
        self.cnn = CNN(param, embedding=self.embedding)
        self.linear = nn.Linear(self.embedding_size, self.embedding_size)

        self.softmax = nn.Softmax()

        self.weights_init()
Пример #10
0
def main():
    # data loaders
    _, va_loader, _, get_semantic_label = get_train_val_test_loaders(
        num_classes=config('autoencoder.num_classes'))
    dataset = get_data_by_label(va_loader)

    model = Autoencoder(config('autoencoder.ae_repr_dim'))
    criterion = torch.nn.MSELoss()

    # Attempts to restore the latest checkpoint if exists
    print('Loading autoencoder...')
    #model, start_epoch, _ = restore_checkpoint(model,
        #config('autoencoder.checkpoint'))
    #evaluate_autoencoder(dataset, get_semantic_label, model, criterion)

    # Evaluate model
    model = CNN()
    model, start_epoch, _ = restore_checkpoint(model, config('cnn.checkpoint'))
    evaluate_autoencoder(dataset, get_semantic_label, model, criterion)
Пример #11
0
    def __init__(self, args, train_dataset, device, input_channel, num_classes):

        # Hyper Parameters
        self.batch_size = 128
        learning_rate = args.lr

        if args.forget_rate is None:
            if args.noise_type == "asymmetric":
                forget_rate = args.noise_rate / 2
            else:
                forget_rate = args.noise_rate
        else:
            forget_rate = args.forget_rate

        self.noise_or_not = train_dataset.noise_or_not

        # Adjust learning rate and betas for Adam Optimizer
        mom1 = 0.9
        mom2 = 0.1
        self.alpha_plan = [learning_rate] * args.n_epoch
        self.beta1_plan = [mom1] * args.n_epoch

        for i in range(args.epoch_decay_start, args.n_epoch):
            self.alpha_plan[i] = float(args.n_epoch - i) / (args.n_epoch - args.epoch_decay_start) * learning_rate
            self.beta1_plan[i] = mom2

        # define drop rate schedule
        self.rate_schedule = np.ones(args.n_epoch) * forget_rate
        self.rate_schedule[:args.num_gradual] = np.linspace(0, forget_rate ** args.exponent, args.num_gradual)

        self.device = device
        self.num_iter_per_epoch = args.num_iter_per_epoch
        self.print_freq = args.print_freq
        self.co_lambda = args.co_lambda
        self.n_epoch = args.n_epoch
        self.train_dataset = train_dataset

        if args.model_type == "cnn":
            if args.dataset == 'mnist':
                self.model1 = CNN(input_channel=input_channel, n_outputs=num_classes, linear_num=144)
                self.model2 = CNN(input_channel=input_channel, n_outputs=num_classes, linear_num=144)
            else:
                self.model1 = CNN(input_channel=input_channel, n_outputs=num_classes)
                self.model2 = CNN(input_channel=input_channel, n_outputs=num_classes)
        elif args.model_type == "mlp":
            self.model1 = MLPNet()
            self.model2 = MLPNet()
        elif args.model_type == 'resnet50':
            self.model1 = resnet50(pretrained=True, num_classes=num_classes)
            self.model2 = resnet50(pretrained=True, num_classes=num_classes)

        self.model1.to(device)
        print(self.model1.parameters)

        self.model2.to(device)
        print(self.model2.parameters)

        if args.optimizer == 'Adam' or args.optimizer == 'adam':
            self.optimizer = torch.optim.Adam(list(self.model1.parameters()) + list(self.model2.parameters()),
                                              lr=learning_rate)
        elif args.optimizer == "SGD" or args.optimizer == 'sgd':
            self.optimizer = torch.optim.SGD(list(self.model1.parameters()) + list(self.model2.parameters()),
                                             lr=learning_rate, momentum=0.9, weight_decay=1e-3)
        else:
            raise NotImplementedError("ERROR: Optimizer {} not been implemented!".format(args.optimizer))

        self.loss_fn = loss_jocor


        self.adjust_lr = args.adjust_lr
Пример #12
0
    xi = xi.view((1,3,config('image_dim'),config('image_dim')))
    zi = F.relu(model.conv1(xi))
    zi = zi.detach().numpy()[0]
    sort_mask = np.argsort(model.conv1.weight.detach().numpy().mean(axis=(1,2,3)))
    zi = zi[sort_mask]
    fig, axes = plt.subplots(4, 4, figsize=(10,10))
    for i, ax in enumerate(axes.ravel()):
        ax.axis('off')
        im = ax.imshow(zi[i], cmap='gray')
    fig.suptitle('Layer 1 activations, y={}'.format(yi))
    fig.savefig('CNN_viz1_{}.png'.format(yi), dpi=200, bbox_inches='tight')

if __name__ == '__main__':
    # Attempts to restore from checkpoint
    print('Loading cnn...')
    model = CNN()
    model, start_epoch, _ = restore_checkpoint(model, config('cnn.checkpoint'),
        force=True)

    tr_loader, _, _, _ = get_train_val_test_loaders(
        num_classes=config('cnn.num_classes'))

    # Saving input images in original resolution
    metadata = pd.read_csv(config('csv_file'))
    for idx in [0, 4, 14, 15, 21]:
        filename = os.path.join(
            config('image_path'), metadata.loc[idx, 'filename'])
        plt.imshow(imread(filename))
        plt.axis('off')
        plt.savefig('CNN_viz0_{}.png'.format(int(
            metadata.loc[idx, 'numeric_label'])),
Пример #13
0
    def __init__(self, args, train_dataset, device, input_channel,
                 num_classes):

        # Hyper Parameters
        self.batch_size = 128
        learning_rate = args.lr

        if args.forget_rate is None:
            if args.noise_type == "asymmetric":
                forget_rate = args.noise_rate / 2
            else:
                forget_rate = args.noise_rate
        else:
            forget_rate = args.forget_rate

        self.noise_or_not = train_dataset.noise_or_not

        # Adjust learning rate and betas for Adam Optimizer
        mom1 = 0.9
        mom2 = 0.1
        self.alpha_plan = [learning_rate] * args.n_epoch
        self.beta1_plan = [mom1] * args.n_epoch

        for i in range(args.epoch_decay_start, args.n_epoch):
            self.alpha_plan[i] = float(args.n_epoch - i) / (
                args.n_epoch - args.epoch_decay_start) * learning_rate
            self.beta1_plan[i] = mom2

        # define drop rate schedule
        self.rate_schedule = np.ones(args.n_epoch) * forget_rate
        self.rate_schedule[:args.num_gradual] = np.linspace(
            0, forget_rate**args.exponent, args.num_gradual)

        self.device = device
        self.num_iter_per_epoch = args.num_iter_per_epoch
        self.print_freq = args.print_freq
        self.co_lambda = args.co_lambda
        self.n_epoch = args.n_epoch
        self.train_dataset = train_dataset

        if args.model_type == "cnn":
            self.model1 = CNN(input_channel=input_channel,
                              n_outputs=num_classes)
            self.model2 = CNN(input_channel=input_channel,
                              n_outputs=num_classes)
        elif args.model_type == "mlp":
            self.model1 = MLPNet()
            self.model2 = MLPNet()

        self.model1.to(device)
        print(self.model1.parameters)

        self.model2.to(device)
        print(self.model2.parameters)

        self.optimizer = torch.optim.Adam(list(self.model1.parameters()) +
                                          list(self.model2.parameters()),
                                          lr=learning_rate)

        self.loss_fn = loss_jocor

        self.adjust_lr = args.adjust_lr
Пример #14
0
    def __init__(self):
        img_width_range = cfg.img_width_range
        word_len = cfg.word_len
        self.batch_size = cfg.batch_size
        self.visualize = cfg.visualize
        gpu_device_id = '/gpu:' + str(cfg.gpu_id)
        if cfg.gpu_id == -1:
            gpu_device_id = '/cpu:0'
            print("Using CPU model!")
        with tf.device(gpu_device_id):
            self.img_data = tf.placeholder(tf.float32,
                                           shape=(None, 1, 32, None),
                                           name='img_data')
            self.zero_paddings = tf.placeholder(tf.float32,
                                                shape=(None, None, 512),
                                                name='zero_paddings')

        self.bucket_specs = [(int(math.floor(64 / 4)), int(word_len + 2)),
                             (int(math.floor(108 / 4)), int(word_len + 2)),
                             (int(math.floor(140 / 4)), int(word_len + 2)),
                             (int(math.floor(256 / 4)), int(word_len + 2)),
                             (int(math.floor(img_width_range[1] / 4)),
                              int(word_len + 2))]
        buckets = self.buckets = self.bucket_specs

        self.decoder_inputs = []
        self.encoder_masks = []
        self.target_weights = []
        with tf.device(gpu_device_id):
            for i in xrange(int(buckets[-1][0] + 1)):
                self.encoder_masks.append(
                    tf.placeholder(tf.float32,
                                   shape=[None, 1],
                                   name="encoder_mask{0}".format(i)))
            for i in xrange(buckets[-1][1] + 1):
                self.decoder_inputs.append(
                    tf.placeholder(tf.int32,
                                   shape=[None],
                                   name="decoder{0}".format(i)))
                self.target_weights.append(
                    tf.placeholder(tf.float32,
                                   shape=[None],
                                   name="weight{0}".format(i)))
        self.bucket_min_width, self.bucket_max_width = img_width_range
        self.image_height = cfg.img_height
        self.valid_target_len = cfg.valid_target_len
        self.forward_only = True

        self.bucket_data = {
            i: BucketData()
            for i in range(self.bucket_max_width + 1)
        }

        with tf.device(gpu_device_id):
            cnn_model = CNN(self.img_data, True)  #(not self.forward_only))
            self.conv_output = cnn_model.tf_output()
            self.concat_conv_output = tf.concat(
                axis=1, values=[self.conv_output, self.zero_paddings])

            self.perm_conv_output = tf.transpose(self.concat_conv_output,
                                                 perm=[1, 0, 2])

        with tf.device(gpu_device_id):
            self.attention_decoder_model = Seq2SeqModel(
                encoder_masks=self.encoder_masks,
                encoder_inputs_tensor=self.perm_conv_output,
                decoder_inputs=self.decoder_inputs,
                target_weights=self.target_weights,
                target_vocab_size=cfg.target_vocab_size,
                buckets=self.buckets,
                target_embedding_size=cfg.target_embedding_size,
                attn_num_layers=cfg.attn_num_layers,
                attn_num_hidden=cfg.attn_num_hidden,
                forward_only=self.forward_only,
                use_gru=cfg.use_gru)
        #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)
        self.sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True))
        self.saver_all = tf.train.Saver(tf.global_variables())
        self.saver_all.restore(self.sess, cfg.ocr_model_path)
Пример #15
0
    n_data, n_input = X.shape
    n_class = np.unique(t).size
    T = create_label(t, n_data, n_class)

    print 'make train/test data'
    n_train, n_test = 1000, 50
    i = np.random.permutation(n_data)[:n_train+n_test]
    i_train, i_test = np.hsplit(i, [n_train])
    X_train, X_test = X[i_train, :].reshape(n_train, 1, 28, 28), X[i_test, :].reshape(n_test, 1, 28, 28)
    T_train, T_test = T[i_train, :], T[i_test, :]

    print 'initialize...'
    linear, sigmoid, softmax, relu = act.linear(), act.sigmoid(), act.softmax(), act.relu()
    conv1, conv2 = conv(20, 1, 5, 5, relu), conv(50, 20, 5, 5, relu)
    pool1, pool2 = pool(2, 2, 2), pool(2, 2, 2)
    neural = NN(800, 500, 10, linear, sigmoid, softmax)
    error = err.cross_entropy()
    cnn = CNN(conv1, pool1, conv2, pool2, neural, error)

    print 'train...'
    cnn.train(X_train, T_train, epsilon = 0.005, lam = 0.0001, gamma = 0.9, s_batch = 5, epochs = 50)

    print 'predict...'
    Y_test = cnn.predict(X_test)
    accuracy = cnn.accuracy(Y_test, T_test)
    print 'accuracy: {0}'.format(accuracy)

    print 'save figure of loss...'
    cnn.save_lossfig()

Пример #16
0
    def __init__(self,
                 phase,
                 visualize,
                 output_dir,
                 batch_size,
                 initial_learning_rate,
                 steps_per_checkpoint,
                 model_dir,
                 target_embedding_size,
                 attn_num_hidden,
                 attn_num_layers,
                 clip_gradients,
                 max_gradient_norm,
                 session,
                 load_model,
                 gpu_id,
                 use_gru,
                 use_distance=True,
                 max_image_width=160,
                 max_image_height=60,
                 max_prediction_length=8,
                 channels=1,
                 reg_val=0):

        self.use_distance = use_distance

        # We need resized width, not the actual width
        max_resized_width = 1. * max_image_width / max_image_height * DataGen.IMAGE_HEIGHT

        self.max_original_width = max_image_width
        self.max_width = int(math.ceil(max_resized_width))

        self.encoder_size = int(math.ceil(1. * self.max_width / 4))
        self.decoder_size = max_prediction_length + 2
        self.buckets = [(self.encoder_size, self.decoder_size)]

        if gpu_id >= 0:
            device_id = '/gpu:' + str(gpu_id)
        else:
            device_id = '/cpu:0'
        self.device_id = device_id

        if not os.path.exists(model_dir):
            os.makedirs(model_dir)

        if phase == 'test':
            batch_size = 1

        logging.info('phase: %s', phase)
        logging.info('model_dir: %s', model_dir)
        logging.info('load_model: %s', load_model)
        logging.info('output_dir: %s', output_dir)
        logging.info('steps_per_checkpoint: %d', steps_per_checkpoint)
        logging.info('batch_size: %d', batch_size)
        logging.info('learning_rate: %f', initial_learning_rate)
        logging.info('reg_val: %d', reg_val)
        logging.info('max_gradient_norm: %f', max_gradient_norm)
        logging.info('clip_gradients: %s', clip_gradients)
        logging.info('max_image_width %f', max_image_width)
        logging.info('max_prediction_length %f', max_prediction_length)
        logging.info('channels: %d', channels)
        logging.info('target_embedding_size: %f', target_embedding_size)
        logging.info('attn_num_hidden: %d', attn_num_hidden)
        logging.info('attn_num_layers: %d', attn_num_layers)
        logging.info('visualize: %s', visualize)

        if use_gru:
            logging.info('using GRU in the decoder.')

        self.reg_val = reg_val
        self.sess = session
        self.steps_per_checkpoint = steps_per_checkpoint
        self.model_dir = model_dir
        self.output_dir = output_dir
        self.batch_size = batch_size
        self.global_step = tf.Variable(0, trainable=False)
        self.phase = phase
        self.visualize = visualize
        self.learning_rate = initial_learning_rate
        self.clip_gradients = clip_gradients
        self.channels = channels

        if phase == 'train':
            self.forward_only = False
        else:
            self.forward_only = True

        with tf.device(device_id):

            self.height = tf.constant(DataGen.IMAGE_HEIGHT, dtype=tf.int32)
            self.height_float = tf.constant(DataGen.IMAGE_HEIGHT,
                                            dtype=tf.float64)

            self.img_pl = tf.placeholder(tf.string,
                                         name='input_image_as_bytes')
            self.img_data = tf.cond(tf.less(tf.rank(self.img_pl), 1),
                                    lambda: tf.expand_dims(self.img_pl, 0),
                                    lambda: self.img_pl)
            self.img_data = tf.map_fn(self._prepare_image,
                                      self.img_data,
                                      dtype=tf.float32)
            num_images = tf.shape(self.img_data)[0]

            # TODO: create a mask depending on the image/batch size
            self.encoder_masks = []
            for i in xrange(self.encoder_size + 1):
                self.encoder_masks.append(tf.tile([[1.]], [num_images, 1]))

            self.decoder_inputs = []
            self.target_weights = []
            for i in xrange(self.decoder_size + 1):
                self.decoder_inputs.append(tf.tile([1], [num_images]))
                if i < self.decoder_size:
                    self.target_weights.append(tf.tile([1.], [num_images]))
                else:
                    self.target_weights.append(tf.tile([0.], [num_images]))

            cnn_model = CNN(self.img_data, not self.forward_only)
            self.conv_output = cnn_model.tf_output()
            self.perm_conv_output = tf.transpose(self.conv_output,
                                                 perm=[1, 0, 2])
            self.attention_decoder_model = Seq2SeqModel(
                encoder_masks=self.encoder_masks,
                encoder_inputs_tensor=self.perm_conv_output,
                decoder_inputs=self.decoder_inputs,
                target_weights=self.target_weights,
                target_vocab_size=len(DataGen.CHARMAP),
                buckets=self.buckets,
                target_embedding_size=target_embedding_size,
                attn_num_layers=attn_num_layers,
                attn_num_hidden=attn_num_hidden,
                forward_only=self.forward_only,
                use_gru=use_gru)

            table = tf.contrib.lookup.MutableHashTable(
                key_dtype=tf.int64,
                value_dtype=tf.string,
                default_value="",
                checkpoint=True,
            )

            insert = table.insert(
                tf.constant(list(range(len(DataGen.CHARMAP))), dtype=tf.int64),
                tf.constant(DataGen.CHARMAP),
            )

            with tf.control_dependencies([insert]):
                num_feed = []
                prb_feed = []

                for line in xrange(len(self.attention_decoder_model.output)):
                    guess = tf.argmax(
                        self.attention_decoder_model.output[line], axis=1)
                    proba = tf.reduce_max(tf.nn.softmax(
                        self.attention_decoder_model.output[line]),
                                          axis=1)
                    num_feed.append(guess)
                    prb_feed.append(proba)

                # Join the predictions into a single output string.
                trans_output = tf.transpose(num_feed)
                trans_output = tf.map_fn(
                    lambda m: tf.foldr(
                        lambda a, x: tf.cond(
                            tf.equal(x, DataGen.EOS_ID),
                            lambda: '',
                            lambda: table.lookup(x) + a  # pylint: disable=undefined-variable
                        ),
                        m,
                        initializer=''),
                    trans_output,
                    dtype=tf.string)

                # Calculate the total probability of the output string.
                trans_outprb = tf.transpose(prb_feed)
                trans_outprb = tf.gather(trans_outprb,
                                         tf.range(tf.size(trans_output)))
                trans_outprb = tf.map_fn(lambda m: tf.foldr(
                    lambda a, x: tf.multiply(tf.cast(x, tf.float64), a),
                    m,
                    initializer=tf.cast(1, tf.float64)),
                                         trans_outprb,
                                         dtype=tf.float64)

                self.prediction = tf.cond(
                    tf.equal(tf.shape(trans_output)[0], 1),
                    lambda: trans_output[0],
                    lambda: trans_output,
                )
                self.probability = tf.cond(
                    tf.equal(tf.shape(trans_outprb)[0], 1),
                    lambda: trans_outprb[0],
                    lambda: trans_outprb,
                )

                self.prediction = tf.identity(self.prediction,
                                              name='prediction')
                self.probability = tf.identity(self.probability,
                                               name='probability')

            if not self.forward_only:  # train
                self.updates = []
                self.summaries_by_bucket = []

                params = tf.trainable_variables()
                opt = tf.train.AdadeltaOptimizer(
                    learning_rate=initial_learning_rate)
                loss_op = self.attention_decoder_model.loss

                if self.reg_val > 0:
                    reg_losses = tf.get_collection(
                        tf.GraphKeys.REGULARIZATION_LOSSES)
                    logging.info('Adding %s regularization losses',
                                 len(reg_losses))
                    logging.debug('REGULARIZATION_LOSSES: %s', reg_losses)
                    loss_op = self.reg_val * tf.reduce_sum(
                        reg_losses) + loss_op

                gradients, params = list(
                    zip(*opt.compute_gradients(loss_op, params)))
                if self.clip_gradients:
                    gradients, _ = tf.clip_by_global_norm(
                        gradients, max_gradient_norm)

                # Summaries for loss, variables, gradients, gradient norms and total gradient norm.
                summaries = [
                    tf.summary.scalar("loss", loss_op),
                    tf.summary.scalar("total_gradient_norm",
                                      tf.global_norm(gradients))
                ]
                all_summaries = tf.summary.merge(summaries)
                self.summaries_by_bucket.append(all_summaries)

                # update op - apply gradients
                update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
                with tf.control_dependencies(update_ops):
                    self.updates.append(
                        opt.apply_gradients(list(zip(gradients, params)),
                                            global_step=self.global_step))

        self.saver_all = tf.train.Saver(tf.all_variables())
        self.checkpoint_path = os.path.join(self.model_dir, "model.ckpt")

        ckpt = tf.train.get_checkpoint_state(model_dir)
        if ckpt and load_model:
            # pylint: disable=no-member
            logging.info("Reading model parameters from %s",
                         ckpt.model_checkpoint_path)
            self.saver_all.restore(self.sess, ckpt.model_checkpoint_path)
        else:
            logging.info("Created model with fresh parameters.")
            self.sess.run(tf.initialize_all_variables())
Пример #17
0
from flask import Flask, request, jsonify, make_response
from flask_restful import Resource, Api
import pickle
import numpy as np
from model.cnn import CNN
import cv2

cnn = CNN("RGB")
app = Flask(__name__)
api = Api(app)


class Predict(Resource):
    def post(self):
        data = request.data
        img = cv2.imdecode(np.fromstring(data, dtype=np.uint8),
                           cv2.IMREAD_COLOR)
        results = cnn.raw_predict(img)

        return make_response(jsonify(results), 201)


api.add_resource(Predict, '/predict')
if __name__ == '__main__':
    app.run(host='0.0.0.0', debug=True, port=5000)
Пример #18
0
    path = images.paths[20000]
    image = images._getitem(path)
    print ('read image {} of shape {}'.format(path, image.shape))


my_split=poses[0]
my_split=[path[:-4] for path in my_split]


"""Use SRGAN"""
srgan = generator()
srgan.load_weights('weights/srgan/gan_generator.h5')


"""Upload customed cnn model"""
cnn = CNN(256, 256, 3, 101)
cnn.load_weights('weights/custom/cnn_plus.h5')
plot_model(cnn, to_file='./model.png', show_shapes=True, show_layer_names=True)


train_model(2, 'cnn_plus', cnn, srgan)

#filepath="./cnn_weights.h5"
#checkpoint = ModelCheckpoint(filepath, monitor='accuracy', verbose=1, save_best_only=True, mode='max')
#callbacks_list = [checkpoint]

"""Prepare and train on a batch of data and labels, 10 iterations"""
for i in range(2):
    train_set = devide(24, 2, 2)
    X = tensor2numpy('./data/', train_set, srgan)
    x = [X[i] for i in X.keys()]
Пример #19
0
def make_model(train_set_x, n_class):
    input_dim = train_set_x.shape[1:]
    layers = make_cnn(input_dim, n_class)
    cnn = CNN(layers)
    return cnn
Пример #20
0
import cv2
from model.cnn import CNN
import imutils
import math

if __name__ == "__main__":
    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 720)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
    font = cv2.FONT_HERSHEY_SIMPLEX
    model = CNN("gray")
    while True:
        ret, img = cap.read()  # full image
        gray, results = model.raw_predict(img)
        print(results)
        cv2.imshow('gray', gray)
        k = cv2.waitKey(30) & 0xff
        if k == 27:
            break

    cap.release()
    cv2.destroyAllWindows()
Пример #21
0
class WassersteinGAN():
    def __init__(self, config):
        # get config args
        self.out_dir = config['output_directory']
        self.batch_size = config['batch_size']
        self.input_dims = config['input_dimensions']
        self.learn_rate = config['learning_rate']
        self.z_dim = config['z_dimension']
        self.name = config['model_name']
        self.verbosity = config['verbosity']
        self.num_train = config['number_train']
        self.num_test = config['number_test']

        # initialize logging to create new log file and log any level event
        logging.basicConfig(format='%(asctime)s %(message)s',
                            datefmt='%m/%d/%Y %I:%M:%S %p',
                            filename='{}{}.log'.format(self.out_dir,
                                                       self.name),
                            filemode='w',
                            level=logging.INFO)

        # try to get gpu device, if not just use cpu
        self.device = \
            torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

        # initialize critic (CNN) model
        self.critic = CNN(in_chan=self.input_dims[-1], out_dim=1, out_act=None)

        # initialize generator (TransposeCNN) model
        self.generator = TransposeCNN(in_dim=self.z_dim,
                                      out_chan=self.input_dims[-1],
                                      out_act=torch.nn.Tanh())

        # initialize zdim dimensional normal distribution to sample generator
        # inputs
        self.z_dist = torch.distributions.normal.Normal(
            torch.zeros(self.batch_size, self.z_dim),
            torch.ones(self.batch_size, self.z_dim))

        # initialize bs dimensional uniform distribution to sample eps vals for
        # creating interpolations
        self.eps_dist = torch.distributions.uniform.Uniform(
            torch.zeros(self.batch_size, 1, 1, 1),
            torch.ones(self.batch_size, 1, 1, 1))

        # sample a batch of z to have constant set of generator inputs
        #  as model trains
        self.z_const = self.z_dist.sample()[:64].to(self.device)

        # initialize critic and generator optimizers
        self.crit_opt = torch.optim.Adam(self.critic.parameters(),
                                         lr=self.learn_rate)
        self.gen_opt = torch.optim.Adam(self.generator.parameters(),
                                        lr=self.learn_rate)

        # move tensors to device
        self.critic.to(self.device)
        self.generator.to(self.device)

    def print_structure(self):
        print('[INFO] \'{}\' critic structure \n{}'.format(
            self.name, self.critic))
        print('[INFO] \'{}\' generator structure \n{}'.format(
            self.name, self.generator))

    @staticmethod
    def compute_losses(real_crit_out, fake_crit_out, crit_grad):
        # compute wasserstein distance estimate
        wass_dist = torch.mean(real_crit_out - fake_crit_out)

        # compute mean of normed critic gradients
        crit_grad_mean_norm = torch.mean(torch.norm(crit_grad, p=2,
                                                    dim=(2, 3)))

        # lagrangian multiplier for critic gradient penalty
        # (push crit_grad_mean_norm -> 1)
        crit_grad_penalty = (crit_grad_mean_norm - 1.)**2

        # compute generator loss
        gen_loss = wass_dist

        # compute critic loss with lambda=10 weighted critic gradient penalty
        crit_loss = (10.0 * crit_grad_penalty) - wass_dist

        return gen_loss, crit_loss, wass_dist, crit_grad_penalty

    def generate_samples_and_tile(self, z):
        # generator a batch of fak images from z input
        fake_img_batch = self.generator(z)

        # detach, move to cpu, and covert images to numpy
        fake_img_batch = fake_img_batch.detach().cpu().numpy()

        # move channel dim to last dim of tensor
        fake_img_batch = np.transpose(fake_img_batch, [0, 2, 3, 1])

        # construct tiled image (squeeze to remove channel dim for grayscale)
        fake_img_tiled = np.squeeze(tile_images(fake_img_batch))

        return fake_img_tiled

    def train(self, dataloader, num_epochs):
        # iterate through epochs
        for e in range(num_epochs):

            # accumulator for wasserstein distance over an epoch
            running_w_dist = 0.0

            # iterate through batches
            for i, batch in enumerate(dataloader):

                # get images from batch
                real_img_batch = batch['image'].to(self.device)

                # get number of samples in batch
                bs = real_img_batch.shape[0]

                # sample from z and eps distribution and clip based on number
                # of samples in batch
                z_sample = self.z_dist.sample()[:bs].to(self.device)
                eps_sample = self.eps_dist.sample()[:bs].to(self.device)

                # generate batch of fake images by feeding sampled z through
                # generator
                fake_img_batch = self.generator(z_sample)

                # compute batch of images by interpolating eps_sample amount
                # between real and fake
                # (generated) images
                int_img_batch = (eps_sample * real_img_batch) + \
                                ((1. - eps_sample) * fake_img_batch)

                # compute critic outputs from real, fake, and interpolated
                # image batches
                real_crit_out = self.critic(real_img_batch)
                fake_crit_out = self.critic(fake_img_batch)
                int_crit_out = self.critic(int_img_batch)

                # compute gradient of critic output w.r.t interpolated image
                # inputs
                crit_grad = torch.autograd.grad(
                    outputs=int_crit_out,
                    inputs=int_img_batch,
                    grad_outputs=torch.ones_like(int_crit_out),
                    create_graph=True,
                    retain_graph=True,
                    only_inputs=True)[0]

                # compute losses
                gen_loss, crit_loss, w_dist, grad_pen = self.compute_losses(
                    real_crit_out, fake_crit_out, crit_grad)

                # NOTE: Currently must update critic and generator separately.
                # If both are updated within the same loop, either updating
                # doesn't happen, or an inplace operator error occurs which
                # prevents gradient computation, depending on the ordering of
                # the zero_grad(), backward(), step() calls. ???

                if i % 10 == 9:
                    # update just the generator (every 10th step)
                    self.gen_opt.zero_grad()
                    gen_loss.backward()
                    self.gen_opt.step()

                    if self.verbosity:
                        # generate const batch of fake samples and tile
                        fake_img_tiled = self.generate_samples_and_tile(
                            self.z_const)

                        # save tiled image
                        plt.imsave(
                            '{}{}_gen_step_{}.png'.format(
                                self.out_dir, self.name,
                                (e *
                                 (int(self.num_train / self.batch_size) + 1)) +
                                i), fake_img_tiled)
                else:
                    # update just the critic
                    self.crit_opt.zero_grad()
                    crit_loss.backward()
                    self.crit_opt.step()

                # accumulate running wasserstein distance
                running_w_dist += w_dist.item()

            # done with current epoch

            # compute average wasserstein distance over epoch
            epoch_avg_w_dist = running_w_dist / i

            # log epoch stats info
            logging.info(
                '| epoch: {:3} | wasserstein distance: {:6.2f} | gradient '
                'penalty: {:6.2f} |'.format(e + 1, epoch_avg_w_dist, grad_pen))

            # new sample from z dist
            z_sample = self.z_dist.sample()[:64].to(self.device)

            # generate const batch of fake samples and tile
            fake_img_tiled = self.generate_samples_and_tile(z_sample)

            if self.verbosity:
                # save tiled image
                plt.imsave(
                    '{}{}_gen_epoch_{}.png'.format(self.out_dir, self.name,
                                                   e + 1), fake_img_tiled)

            # save current state of generator and critic
            torch.save(self.generator.state_dict(),
                       '{}{}_generator.pt'.format(self.out_dir, self.name))
            torch.save(self.critic.state_dict(),
                       '{}{}_critic.pt'.format(self.out_dir, self.name))
Пример #22
0
def main(args):
    # Enter all arguments that you want to be in the filename of the saved output
    ordered_args = [
        'dataset',
        'data_augmentation',
        'seed',
        'remove_percent',
        'burn_in_epochs',
        'remove_strategy',
        'noise_percent',
        'noise_labels',
        'noise_pixels_percent',
        'noise_pixels_std',
        'optimizer',
        'learning_rate',
    ]
    save_fname = '__'.join('{}_{}'.format(arg, args_dict[arg])
                           for arg in ordered_args)
    fname = os.path.join(args.output_dir, save_fname)
    if os.path.exists(fname + '__stats_dict.pkl'):
        redo = input(
            "There exists experiment result already, continue? [yes/no] ")
        if redo == 'no':
            exit()
        elif redo == 'yes':
            pass
        else:
            raise ValueError('wrong answer')

    os.makedirs(args.output_dir, exist_ok=True)

    # Set appropriate devices
    device = torch.device(args.device)
    print('run on device: {0}'.format(device))
    cudnn.benchmark = True  # Should make training go faster for large models

    # Set random seed for initialization
    torch.manual_seed(args.seed)
    if 'cuda' in args.device:
        torch.cuda.manual_seed(args.seed)
    npr.seed(args.seed)

    train_ds, test_ds, num_classes = get_data(args.dataset)

    if args.noise_percent > 0:
        assert not (args.noise_labels and (args.noise_pixels_percent > 0))
        if args.noise_labels:
            train_ds, noise_indexes = noise_labels(train_ds,
                                                   args.noise_percent, fname)
        if args.noise_pixels_percent:
            train_ds, noise_indexes = noise_pixels(train_ds,
                                                   args.noise_percent,
                                                   args.noise_pixels_percent,
                                                   args.noise_pixels_std,
                                                   fname)

    print('Training on ' + str(len(train_ds)) + ' examples')

    # Setup model
    if args.model == 'resnet18':
        model = ResNet18(num_classes=num_classes)
    elif args.model == 'wideresnet':
        if args.dataset == 'svhn':
            model = WideResNet(depth=16,
                               num_classes=num_classes,
                               widen_factor=8,
                               dropRate=0.4)
        else:
            model = WideResNet(depth=28,
                               num_classes=num_classes,
                               widen_factor=10,
                               dropRate=0.3)
    elif args.model == 'cnn':
        model = CNN(num_classes=num_classes)
    else:
        print(
            'Specified model not recognized. Options are: resnet18 and wideresnet'
        )

    # Setup loss
    model = model.to(args.device)
    criterion = torch.nn.CrossEntropyLoss().cuda()
    criterion.__init__(reduce=False)

    # Setup optimizer
    if args.optimizer == 'adam':
        model_optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    elif args.optimizer == 'sgd':
        model_optimizer = torch.optim.SGD(model.parameters(),
                                          lr=args.learning_rate,
                                          momentum=0.9,
                                          nesterov=True,
                                          weight_decay=5e-4)
        scheduler = MultiStepLR(model_optimizer,
                                milestones=[60, 120, 160],
                                gamma=0.2)
    elif args.optimizer == 'sgd-const-lr':
        model_optimizer = torch.optim.SGD(model.parameters(),
                                          lr=args.learning_rate,
                                          momentum=0.9,
                                          nesterov=True,
                                          weight_decay=5e-4)
    else:
        print('Specified optimizer not recognized. Options are: adam and sgd')

    save_point = os.path.join(args.output_dir, 'checkpoint', args.dataset)
    os.makedirs(save_point, exist_ok=True)
    checkpoint_fname = os.path.join(save_point, save_fname + '.t7')

    # Initialize dictionary to save statistics for every example presentation
    example_stats = {}
    num_examples = len(train_ds)
    example_weights = np.ones(num_examples)

    elapsed_time = 0
    # train_idx = np.array(range(0, len(train_ds)))
    train_loader = DataLoader(train_ds,
                              batch_size=args.batch_size,
                              shuffle=True)
    for epoch in range(args.epochs):
        if args.remove_strategy != 'normal' and epoch >= args.burn_in_epochs:
            if 'sampling' in args.remove_strategy:
                # sampling by weight
                normalized_weights = example_weights / example_weights.sum()
                index_stats = example_stats.get('example_weights', [[], []])
                index_stats[1].append(normalized_weights)
                example_stats['example_weights'] = index_stats

                choice_num = int(num_examples *
                                 (1 - args.remove_percent / 100))
                train_idx = np.random.choice(range(num_examples),
                                             size=choice_num,
                                             replace=False,
                                             p=normalized_weights)
            elif args.remove_strategy == 'low-acc':
                remove_n = int(args.remove_percent * num_examples / 100)
                losses = []
                for idx in range(num_examples):
                    losses.append(example_stats[idx][0][epoch - 1])
                losses = np.array(losses)
                sorted_indexes = np.argsort(losses)
                train_idx = sorted_indexes[:num_examples - remove_n]
            elif args.remove_strategy == 'all-noise':
                remove_n = int(args.remove_percent * num_examples / 100)
                if args.remove_percent <= args.noise_percent_labels:
                    remove_indexes = npr.choice(noise_indexes,
                                                remove_n,
                                                replace=False)
                    train_idx = np.setdiff1d(range(num_examples),
                                             remove_indexes)
                else:
                    train_idx = np.setdiff1d(range(num_examples),
                                             noise_indexes)
                    train_idx = npr.choice(train_idx,
                                           num_examples - remove_n,
                                           replace=False)
            else:
                # event method
                _, unlearned_per_presentation, _, first_learned = compute_forgetting_statistics(
                    example_stats, epoch)
                ordered_examples, ordered_values = sort_examples_by_forgetting(
                    [unlearned_per_presentation], [first_learned], epoch)
                train_idx = sample_dataset_by_forgetting(
                    train_ds, ordered_examples, ordered_values,
                    args.remove_percent, args.remove_strategy)
            sampler = torch.utils.data.SubsetRandomSampler(train_idx)
            train_loader = DataLoader(train_ds,
                                      batch_size=args.batch_size,
                                      sampler=sampler)

        start_time = time.time()
        train(args, model, criterion, device, train_loader, model_optimizer,
              epoch, example_stats)

        test_loader = DataLoader(test_ds, batch_size=32, shuffle=True)
        test(epoch, model, criterion, device, test_loader, example_stats,
             checkpoint_fname)

        if args.remove_strategy != 'normal' and epoch >= args.burn_in_epochs:
            # evaluate on removed data
            removed_idx = np.setdiff1d(range(num_examples), train_idx)
            sampler = torch.utils.data.SubsetRandomSampler(removed_idx)
            removed_loader = DataLoader(train_ds,
                                        batch_size=args.batch_size,
                                        sampler=sampler)
            evaluate_on_removed(model, criterion, device, removed_loader,
                                epoch, example_stats)

        if 'sampling' in args.remove_strategy:
            example_weights = update_example_weights(example_weights,
                                                     example_stats, epoch,
                                                     args.remove_strategy)

        epoch_time = time.time() - start_time
        elapsed_time += epoch_time
        print('| Elapsed time : %d:%02d:%02d' % (get_hms(elapsed_time)))

        # Update optimizer step
        if args.optimizer == 'sgd':
            scheduler.step(epoch)

        # Save the stats dictionary
        fname = os.path.join(args.output_dir, save_fname)
        with open(fname + "__stats_dict.pkl", "wb") as f:
            pickle.dump(example_stats, f)

        # Log the best train and test accuracy so far
        with open(fname + "__best_acc.txt", "w") as f:
            f.write('train test \n')
            f.write(str(max(example_stats['train'][1])))
            f.write(' ')
            f.write(str(max(example_stats['test'][1])))
Пример #23
0
    layers = [conv1, relu1, maxpool1, flatten, dense1]
    return layers


if __name__ == '__main__':

    if len(sys.argv) == 2:
        action = sys.argv[1]
        if action == '--debug':
            # run gradient check on two random data points.
            grad_check()
    else:

        train_set_x, train_set_y, test_set_x, test_set_y, classes = load_dataset(
        )
        num_of_classes = len(classes)
        train_set_x = train_set_x / 255
        test_set_x = test_set_x / 255

        input_dim = train_set_x.shape[1:]
        layers = make_cnn(input_dim, num_of_classes)
        cnn = CNN(layers)
        cnn, costs = Adam(model=cnn,
                          X_train=train_set_x,
                          y_train=train_set_y,
                          epoch=100,
                          learning_rate=0.001,
                          X_test=test_set_x,
                          y_test=test_set_y,
                          minibatch_size=64).minimize()
Пример #24
0
                                torch.IntTensor([l]),
                                raw=raw))
                index += l
            return texts

    def label_dict(self):

        return self.dict

    def label_constant(self):

        return self.alphabet


if __name__ == '__main__':

    sys.path.append('../')
    from model.cnn import CNN

    img = Image.open('../test/ocr/test.jpg')

    alphabet = process_alphabet('../weight/ocr/ocr.json')

    model = CNN(1).cuda()
    checkpoint = torch.load('../weight/ocr/ocr.pth.tar')
    model.load_state_dict(checkpoint)

    res = predict(model, img, alphabet)

    print(res)
Пример #25
0
from main import text_ocr
from helper.image import read_url_img, base64_to_PIL, get_now
from config import ocrPath, textPath, GPU

# load model
import torch
from model.vgg import VGG
from model.cnn import CNN

textModel = VGG(3).cuda() if GPU else VGG(3)
checkpoint = torch.load(textPath)
textModel.load_state_dict(checkpoint)
print(textModel)

ocrModel = CNN(1).cuda() if GPU else CNN(1)
checkpoint = torch.load(ocrPath)
ocrModel.load_state_dict(checkpoint)
print(ocrModel)

billList = []
root = './test/'
timeOutTime = 5


def job(uid, url, imgString, iscut, isclass, billModel, ip):
    now = get_now()
    if url is not None:
        img = read_url_img(url)
    elif imgString is not None:
        img = base64_to_PIL(imgString)