Пример #1
0
    def main(self):

        with tf.Graph().as_default():
            self.processed_tensor_list = map(
                self.image_preprocessor, self.files)

            # Iterate over a map object
            for tensor_tuple in self.processed_tensor_list:

                # Create the model, use the default arg scope to configure the
                # batch norm parameters.
                with self.slim.arg_scope(inception3.inception_v3_arg_scope()):
                    logits, _ = inception3.inception_v3(
                        tensor_tuple[1], num_classes=1001, is_training=False)
                # Append a tuple (image, probability)
                self.image_and_probabilities.append(
                    (tensor_tuple[0], tf.nn.softmax(logits)))

            self.init_fn = self.slim.assign_from_checkpoint_fn(
                os.path.join(self.checkpoints_dir, 'inception_v3.ckpt'),
                self.slim.get_model_variables('InceptionV3'))

            with tf.Session() as sess:
                self.init_fn(sess)
                for idx in range(self.dim):
                    print('Classifying on image' + str(idx))
                    _, probabilities = sess.run([self.image_and_probabilities[idx][
                                                0], self.image_and_probabilities[idx][1]])
                    probabilities = probabilities[0, 0:]
                    sorted_inds = [i[0] for i in sorted(
                        enumerate(-probabilities), key=lambda x:x[1])]

                    names = imagenet.create_readable_names_for_imagenet_labels()

                    temp_array = []
                    for i in range(self.top_k):
                        index = sorted_inds[i]
                        temp_array.append(names[index])
                        if self.print_flag:
                            print('Probability %0.2f%% => [%s]' % (
                                probabilities[index], names[index]))
                    if any(self.keyword in s for s in temp_array):
                        self.accuracy += 1
        print('Classification Accuracy ====> ' +
              str(tf.divide(self.accuracy, self.dim)))

        if self.save_result_to_file:
            with open('Inception_v3_Results.txt', 'wb') as f:
                f.write('Classification Accuracy\n')
                f.write(str(tf.divide(self.accuracy, self.dim)))
Пример #2
0
def get_model(cfg, pretrained=False, load_param_from_ours=False):

    if load_param_from_ours:
        pretrained = False

    model = None
    num_classes = cfg.num_classes
    if cfg.model == 'custom':
        from models import custom_net
        if cfg.patch_size == 64:
            model = custom_net.net_64(num_classes = num_classes)
        elif cfg.patch_size == 32:
            model = custom_net.net_32(num_classes = num_classes)
        else:
            print('Do not support present patch size %s'%cfg.patch_size)
        #model = model
    elif cfg.model == 'googlenet':
        from models import inception_v3
        model = inception_v3.inception_v3(pretrained = pretrained, num_classes = num_classes)
    elif cfg.model == 'vgg':
        from models import vgg
        if cfg.model_info == 19:
            model = vgg.vgg19_bn(pretrained = pretrained, num_classes = num_classes)
        elif cfg.model_info == 16:
            model = vgg.vgg16_bn(pretrained = pretrained, num_classes = num_classes)
    elif cfg.model == 'resnet':
        from models import resnet
        if cfg.model_info == 18:
            model = resnet.resnet18(pretrained= pretrained, num_classes = num_classes)
        elif cfg.model_info == 34:
            model = resnet.resnet34(pretrained= pretrained, num_classes = num_classes)
        elif cfg.model_info == 50:
            model = resnet.resnet50(pretrained= pretrained, num_classes = num_classes)
        elif cfg.model_info == 101:
            model = resnet.resnet101(pretrained= pretrained, num_classes = num_classes)
    if model is None:
        print('not support :' + cfg.model)
        sys.exit(-1)

    if load_param_from_ours:
        print('loading pretrained model from {0}'.format(cfg.init_model_file))
        checkpoint = torch.load(cfg.init_model_file)
        model.load_state_dict(checkpoint['model_param'])

    model.cuda()
    print('shift model to parallel!')
    model = torch.nn.DataParallel(model, device_ids=cfg.gpu_id)
    return model
Пример #3
0
def get_model(cfg, pretrained=True, load_param_from_folder=False):

    if load_param_from_folder:
        pretrained = False

    model = None
    num_classes = cfg.num_classes
    if cfg.model == 'googlenet':
        from models import inception_v3
        model = inception_v3.inception_v3(pretrained = pretrained, num_classes = num_classes)
    elif cfg.model == 'vgg':
        from models import vgg
        if cfg.model_info == 19:
            model = vgg.vgg19_bn(pretrained = pretrained, num_classes = num_classes)
        elif cfg.model_info == 16:
            model = vgg.vgg16_bn(pretrained = pretrained, num_classes = num_classes)
    elif cfg.model == 'resnet':
        from models import resnet
        if cfg.model_info == 18:
            model = resnet.resnet18(pretrained= pretrained, num_classes = num_classes)
        elif cfg.model_info == 34:
            model = resnet.resnet34(pretrained= pretrained, num_classes = num_classes)
        elif cfg.model_info == 50:
            model = resnet.resnet50(pretrained= pretrained, num_classes = num_classes)
        elif cfg.model_info == 101:
            model = resnet.resnet101(pretrained= pretrained, num_classes = num_classes)
    if model is None:
        print('not support :' + cfg.model)
        sys.exit(-1)

    if load_param_from_folder:
        print('loading pretrained model from {0}'.format(cfg.init_model_file))
        checkpoint = torch.load(cfg.init_model_file)
        model.load_state_dict(checkpoint['model_param'])

    print('shift model to parallel!')
    model = torch.nn.DataParallel(model, device_ids=cfg.gpu_id)
    return model
Пример #4
0
classes = ['normal', 'monotone', 'screenshot', 'unknown']
config = {
    'batch_size':
    64,
    'train_split':
    0.8,
    'lr':
    0.000005,
    'n_epochs':
    100,
    'print_every':
    1,
    'cuda':
    True if cuda.is_available() else False,
    'model':
    inception_v3(pretrained=True, freeze=True),
    'criterion':
    nn.NLLLoss(),
    'transform':
    transforms.Compose([
        transforms.Resize(size=299),
        # transforms.RandomRotation(degrees=15),
        # transforms.ColorJitter(),
        transforms.RandomHorizontalFlip(),
        # transforms.RandomResizedCrop(size=299),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
    'early_stop':
    100,
    'balance':
graph = tf.Graph()  # no necessiry

with graph.as_default():

    # 1. Construct a graph representing the model.
    x = tf.placeholder(tf.float32, [None, 784])  # Placeholder for input.
    y = tf.placeholder(tf.float32, [None, 10])  # Placeholder for labels.

    NN = 1  # type of neural network

    if NN == 1:  # SLIM inception

        x_images = tf.reshape(x, [-1, 28, 28, 1])
        x_images = tf.image.resize_images(x_images, [299, 299])
        logits, end_points = inception.inception_v3(x_images,
                                                    num_classes=10,
                                                    is_training=True)

    if NN == 2:  # SLIM simple NN

        print('use slim')

        def lenet(images):
            net = slim.conv2d(images, 20, [5, 5], scope='conv1')
            net = slim.max_pool2d(net, [2, 2], scope='pool1')
            net = slim.conv2d(net, 50, [5, 5], scope='conv2')
            net = slim.max_pool2d(net, [2, 2], scope='pool2')
            net = slim.flatten(net, scope='flatten3')
            net = slim.fully_connected(net, 500, scope='fc4')
            net = slim.fully_connected(net,
                                       10,