Пример #1
0
def get_model_and_checkpoint(model, dataset, checkpoint_path, n_gpu=1):
    if model == 'mobilenet' and dataset == 'imagenet':
        from mobilenet import MobileNet
        net = MobileNet(n_class=1000)
    elif model == 'mobilenetv2' and dataset == 'imagenet':
        from mobilenet_v2 import MobileNetV2
        net = MobileNetV2(n_class=1000)
    elif model == 'mobilenet' and dataset == 'cifar10':
        from mobilenet import MobileNet
        net = MobileNet(n_class=10)
    elif model == 'mobilenetv2' and dataset == 'cifar10':
        from mobilenet_v2 import MobileNetV2
        net = MobileNetV2(n_class=10)
    else:
        raise NotImplementedError
    if checkpoint_path:
        print('loading {}...'.format(checkpoint_path))
        sd = torch.load(checkpoint_path, map_location=torch.device('cpu'))
        if 'state_dict' in sd:  # a checkpoint but not a state_dict
            sd = sd['state_dict']
        sd = {k.replace('module.', ''): v for k, v in sd.items()}
        net.load_state_dict(sd)

    if torch.cuda.is_available() and n_gpu > 0:
        net = net.cuda()
        if n_gpu > 1:
            net = torch.nn.DataParallel(net, range(n_gpu))

    return net
Пример #2
0
def run():
    t = time.time()
    print('net_cache : ', args.net_cache)

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    model = MobileNetV2()
    model = nn.DataParallel(model.cuda())

    if os.path.exists(args.net_cache):
        print('loading checkpoint {} ..........'.format(args.net_cache))
        checkpoint = torch.load(args.net_cache)
        best_top1_acc = checkpoint['best_top1_acc']
        model.load_state_dict(checkpoint['state_dict'])
        print("loaded checkpoint {} epoch = {}".format(args.net_cache,
                                                       checkpoint['epoch']))

    else:
        print('can not find {} '.format(args.net_cache))
        return

    num_states = 17
    search(model, criterion, num_states)

    total_searching_time = time.time() - t
    print('total searching time = {:.2f} hours'.format(total_searching_time /
                                                       3600),
          flush=True)
Пример #3
0
def load_model (args):

	if args.model == 'inception':
		model = InceptionV3(include_top=True, weights='imagenet')
		preprocess_mode='tf'
	elif args.model == 'xception':
		model = Xception(include_top=True, weights='imagenet')
		preprocess_mode='tf'
	elif args.model == 'inceptionresnet':
		model = InceptionResNetV2(include_top=True, weights='imagenet')
		preprocess_mode='tf'
	elif args.model == 'mobilenet':
		model = MobileNet(include_top=True, weights='imagenet')
		preprocess_mode='tf'
	elif args.model == 'mobilenet2':	
		model = MobileNetV2(include_top=True, weights='imagenet')
		preprocess_mode='tf'
	elif args.model == 'nasnet':	
		model = NASNetLarge(include_top=True, weights='imagenet')
		preprocess_mode='tf'
	elif args.model == 'resnet':
		model = ResNet50(include_top=True, weights='imagenet')
		preprocess_mode='caffe'
	elif args.model == 'vgg16':
		model = VGG16(include_top=True, weights='imagenet')
		preprocess_mode='caffe'
	elif args.model == 'vgg19':
		model = VGG19(include_top=True, weights='imagenet')
		preprocess_mode='caffe'
	else:
		print ("Model not found")

	return model,preprocess_mode
Пример #4
0
def get_model_and_checkpoint(model, dataset, checkpoint_path, n_gpu=1):
    if dataset == 'imagenet':
        n_class = 1000
    elif dataset == 'cifar10':
        n_class = 10
    else:
        raise ValueError('unsupported dataset')

    if model == 'mobilenet':
        from mobilenet import MobileNet
        net = MobileNet(n_class=n_class)
    elif model == 'mobilenetv2':
        from mobilenet_v2 import MobileNetV2
        net = MobileNetV2(n_class=n_class)
    elif model.startswith('resnet'):
        net = resnet.__dict__[model](pretrained=True)
        in_features = net.fc.in_features
        net.fc = nn.Linear(in_features, n_class)
    else:
        raise NotImplementedError
    if checkpoint_path:
        print('loading {}...'.format(checkpoint_path))
        sd = torch.load(checkpoint_path, map_location=torch.device('cpu'))
        if 'state_dict' in sd:  # a checkpoint but not a state_dict
            sd = sd['state_dict']
        sd = {k.replace('module.', ''): v for k, v in sd.items()}
        net.load_state_dict(sd)

    if torch.cuda.is_available() and n_gpu > 0:
        net = net.cuda()
        if n_gpu > 1:
            net = torch.nn.DataParallel(net, range(n_gpu))

    return net
Пример #5
0
def get_model(args):
    print('=> Building model..')

    if args.dataset == 'imagenet':
        n_class = 1000
    elif args.dataset == 'cifar10':
        n_class = 10
    else:
        raise NotImplementedError

    if args.model_type == 'mobilenet':
        net = MobileNet(n_class=n_class)
    elif args.model_type == 'mobilenetv2':
        net = MobileNetV2(n_class=n_class)
    elif args.model_type.startswith('resnet'):
        net = resnet.__dict__[args.model_type](pretrained=True)
        in_features = net.fc.in_features
        net.fc = nn.Linear(in_features, n_class)
    else:
        raise NotImplementedError

    if args.ckpt_path is not None:
        # the checkpoint can be state_dict exported by amc_search.py or saved by amc_train.py
        print('=> Loading checkpoint {} ..'.format(args.ckpt_path))
        net.load_state_dict(torch.load(args.ckpt_path, torch.device('cpu')))
        if args.mask_path is not None:
            SZ = 224 if args.dataset == 'imagenet' else 32
            data = torch.randn(2, 3, SZ, SZ)
            ms = ModelSpeedup(net, data, args.mask_path, torch.device('cpu'))
            ms.speedup_model()

    net.to(args.device)
    if torch.cuda.is_available() and args.n_gpu > 1:
        net = torch.nn.DataParallel(net, list(range(args.n_gpu)))
    return net
Пример #6
0
def create_model(model_type=None,
                 n_classes=120,
                 input_size=224,
                 checkpoint=None,
                 pretrained=False,
                 width_mult=1.):
    if model_type == 'mobilenet_v1':
        model = MobileNet(n_class=n_classes, profile='normal')
    elif model_type == 'mobilenet_v2':
        model = MobileNetV2(n_class=n_classes,
                            input_size=input_size,
                            width_mult=width_mult)
    elif model_type == 'mobilenet_v2_torchhub':
        model = torch.hub.load('pytorch/vision:v0.8.1',
                               'mobilenet_v2',
                               pretrained=pretrained)
        # model = torch.hub.load('pytorch/vision:v0.10.0', 'mobilenet_v2', pretrained=pretrained)
        feature_size = model.classifier[1].weight.data.size()[1]
        replace_classifier = torch.nn.Linear(feature_size, n_classes)
        model.classifier[1] = replace_classifier
    elif model_type is None:
        model = None
    else:
        raise RuntimeError('Unknown model_type.')

    if checkpoint is not None:
        model.load_state_dict(torch.load(checkpoint))

    return model
Пример #7
0
def get_model(args):
    print('=> Building model..')

    if args.dataset == 'imagenet':
        n_class = 1000
    elif args.dataset == 'cifar10':
        n_class = 10
    else:
        raise NotImplementedError

    if args.model_type == 'mobilenet':
        net = MobileNet(n_class=n_class).cuda()
    elif args.model_type == 'mobilenetv2':
        net = MobileNetV2(n_class=n_class).cuda()
    else:
        raise NotImplementedError

    if args.ckpt_path is not None:
        # the checkpoint can be a saved whole model object exported by amc_search.py, or a state_dict
        print('=> Loading checkpoint {} ..'.format(args.ckpt_path))
        ckpt = torch.load(args.ckpt_path)
        if type(ckpt) == dict:
            net.load_state_dict(ckpt['state_dict'])
        else:
            net = ckpt

    net.to(args.device)
    if torch.cuda.is_available() and args.n_gpu > 1:
        net = torch.nn.DataParallel(net, list(range(args.n_gpu)))
    return net
Пример #8
0
    def __init__(self, prev_steps=6, pred_steps=30, n_hid=256, mb2=None):
        super(CNNEncoder, self).__init__()
        self.pred_steps = pred_steps
        self.image_encoder = MobileNetV2() if mb2 is None else mb2
        self.image_emb = nn.Linear(4096, n_hid)
        self.prev_traj_emb = nn.Linear(prev_steps * 2, n_hid)

        self.fusion = nn.Linear(n_hid * 2, n_hid)
        self.pred_fc = nn.Linear(n_hid, pred_steps * 2)
Пример #9
0
    def __init__(self, norm_layer, num_filters=128, pretrained=True):
        """Creates an `FPN` instance for feature extraction.
        Args:
          num_filters: the number of filters in each output pyramid level
          pretrained: use ImageNet pre-trained backbone feature extractor
        """

        super().__init__()
        net = MobileNetV2(n_class=1000)

        if pretrained:
            #Load weights into the project directory
            state_dict = torch.load(
                'mobilenetv2.pth.tar')  # add map_location='cpu' if no gpu
            net.load_state_dict(state_dict)
        self.features = net.features

        self.enc0 = nn.Sequential(*self.features[0:2])
        self.enc1 = nn.Sequential(*self.features[2:4])
        self.enc2 = nn.Sequential(*self.features[4:7])
        self.enc3 = nn.Sequential(*self.features[7:11])
        self.enc4 = nn.Sequential(*self.features[11:16])

        self.td1 = nn.Sequential(
            nn.Conv2d(num_filters, num_filters, kernel_size=3, padding=1),
            norm_layer(num_filters), nn.ReLU(inplace=True))
        self.td2 = nn.Sequential(
            nn.Conv2d(num_filters, num_filters, kernel_size=3, padding=1),
            norm_layer(num_filters), nn.ReLU(inplace=True))
        self.td3 = nn.Sequential(
            nn.Conv2d(num_filters, num_filters, kernel_size=3, padding=1),
            norm_layer(num_filters), nn.ReLU(inplace=True))

        self.lateral4 = nn.Conv2d(160, num_filters, kernel_size=1, bias=False)
        self.lateral3 = nn.Conv2d(64, num_filters, kernel_size=1, bias=False)
        self.lateral2 = nn.Conv2d(32, num_filters, kernel_size=1, bias=False)
        self.lateral1 = nn.Conv2d(24, num_filters, kernel_size=1, bias=False)
        self.lateral0 = nn.Conv2d(16,
                                  num_filters // 2,
                                  kernel_size=1,
                                  bias=False)

        for param in self.features.parameters():
            param.requires_grad = False
Пример #10
0
    def __init__(self, num_label=21, width_mult=1.0):
        super().__init__()

        # Feature Extraction
        backbone = MobileNetV2()
        self.feature_extractor0 = nn.Sequential(
            *backbone.feature_extractor[:14],
            *backbone.feature_extractor[14].conv[:3])
        self.feature_extractor1 = nn.Sequential(
            *backbone.feature_extractor[14].conv[3:],
            *backbone.feature_extractor[15:])

        self.additional_blocks = nn.ModuleList([
            Residual(1280, 512, stride=2, expand_ratio=0.2),
            Residual(512, 256, stride=2, expand_ratio=0.25),
            Residual(256, 256, stride=2, expand_ratio=0.5),
            Residual(256, 64, stride=2, expand_ratio=0.25)
        ])

        # Predict class label and box position
        self.num_label = num_label
        num_defaults = [6] * 6
        backbone.out_channels[0] = round(backbone.out_channels[0] * width_mult)
        self.loc = []
        self.conf = []
        for nd, oc in zip(num_defaults[:-1], backbone.out_channels[:-1]):
            self.loc.append(
                build_SSeparableConv2d(oc, nd * 4, kernel=3, padding=1))
            self.conf.append(
                build_SSeparableConv2d(oc, nd * num_label, kernel=3,
                                       padding=1))
        self.loc.append(
            nn.Conv2d(backbone.out_channels[-1],
                      num_defaults[-1] * 4,
                      kernel_size=1))
        self.conf.append(
            nn.Conv2d(backbone.out_channels[-1],
                      num_defaults[-1] * num_label,
                      kernel_size=1))
        self.conf = nn.ModuleList(self.conf)
        self.loc = nn.ModuleList(self.loc)
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
Пример #11
0
    def __init__(self, use_nmp=False, N_actors=32, N_moving_actors=32, prev_steps=6, pred_steps=30, n_hid=256,
                 do_prob=0.5, cnn='alex'):
        super(NMPEncoder, self).__init__()
        self.use_nmp = use_nmp
        self.N_actors = N_actors
        self.N_moving_actors = N_moving_actors
        self.prev_steps = prev_steps
        self.pred_steps = pred_steps

        # ------ predict the actors trajctories ------- #
        # share raster image feature extractor
        if cnn == 'alex':
            self.image_encoder = AlexNet()
            self.V_encoder = AlexEncoder(prev_steps, pred_steps, n_hid, self.image_encoder)
            self.H_encoder = AlexEncoder(prev_steps, pred_steps, n_hid, self.image_encoder)
            self.M_encoder = AlexEncoder(prev_steps, pred_steps, n_hid, self.image_encoder)
        else:
            self.image_encoder = MobileNetV2()
            self.V_encoder = CNNEncoder(prev_steps, pred_steps, n_hid, self.image_encoder)
            self.H_encoder = CNNEncoder(prev_steps, pred_steps, n_hid, self.image_encoder)
            self.M_encoder = CNNEncoder(prev_steps, pred_steps, n_hid, self.image_encoder)

        if self.use_nmp:
            # ------ NMP -------- #
            if cnn == 'alex':
                self.image_emb = nn.Linear(4096, n_hid)
            else:
                self.image_emb = nn.Linear(1280, n_hid)

            # --- only propagate history trajectory --- #
            self.prev_traj_emb = nn.Linear(prev_steps * 2, n_hid)
            # --- propagate his + baseline prediction --- #
            # self.prev_traj_emb = nn.Linear((prev_steps+pred_steps)*2, n_hid)

            # ----- nmp ----- #
            self.mlp_n2e = MLP(n_hid * 2, n_hid, n_hid, do_prob)
            self.mlp_e2n = MLP(n_hid * 2, n_hid, n_hid, do_prob)

            self.fusion = nn.Linear(n_hid * 2, n_hid)
            # self.fusion = nn.Linear(n_hid, n_hid)
            self.pred_fc = nn.Linear(n_hid, pred_steps * 2)
Пример #12
0
def test():

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    transform_test = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize(400),
        transforms.CenterCrop((400, 400)),
        transforms.ToTensor(),
    ])
    #transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])
    testset = oldBank.Bankset(oldBank.TESTSET_PATH, transform_test)
    testloader = DataLoader(testset, batch_size=6, shuffle=True, num_workers=4)

    model = MobileNetV2(num_classes=7)

    #num_ftrs = model.classifier[1].in_features
    #model.classifier[1] = nn.Linear(num_ftrs, 7)
    model.load_state_dict(torch.load('best_quantized.pth'))
    model.to(device)
    model.eval()

    running_corrects = 0
    matrix = torch.zeros([7, 7])
    for i, data in enumerate(testloader, 0):
        inputs, labels = data['image'], data['class']
        inputs = inputs.to(device)
        labels = labels.to(device)

        with torch.set_grad_enabled(False):
            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)
            for i, p in enumerate(preds):
                matrix[labels[i]][p] += 1
        running_corrects += torch.sum(preds == labels.data)
    print(matrix)
    acc = running_corrects.double() / len(testset)
    print('Test Acc: {:.4f}'.format(acc))
def load_model (args):

	if args.output_layer == '0':
		if args.model == 'inception':
			model = InceptionV3(include_top=False, weights='imagenet', pooling=args.pooling)
			preprocess_mode='tf'
		elif args.model == 'xception':
			model = Xception(include_top=False, weights='imagenet', pooling=args.pooling)
			preprocess_mode='tf'
		elif args.model == 'inceptionresnet':
			model = InceptionResNetV2(include_top=False, weights='imagenet', pooling=args.pooling)
			preprocess_mode='tf'
		elif args.model == 'mobilenet':
			model = MobileNet(include_top=False, weights='imagenet', pooling=args.pooling)
			preprocess_mode='tf'
		elif args.model == 'mobilenet2':	
			model = MobileNetV2(include_top=False, weights='imagenet', pooling=args.pooling)
			preprocess_mode='tf'
		elif args.model == 'nasnet':	
			model = NASNetLarge(include_top=False, weights='imagenet', pooling=args.pooling)
			preprocess_mode='tf'
		elif args.model == 'resnet':
			model = ResNet50(include_top=False, weights='imagenet', pooling=args.pooling)
			preprocess_mode='caffe'
		elif args.model == 'vgg16':
			model = VGG16(include_top=False, weights='imagenet', pooling=args.pooling)
			preprocess_mode='caffe'
		elif args.model == 'vgg19':
			model = VGG19(include_top=False, weights='imagenet', pooling=args.pooling)
			preprocess_mode='caffe'
		else:
			print ("Model not found")
			return 0
	else:
		if args.model == 'inception':
			base_model = InceptionV3(include_top=False, weights='imagenet', pooling=args.pooling)
			model = Model(input=base_model.input, output=base_model.get_layer(args.output_layer).output)
			preprocess_mode='tf'
		elif args.model == 'xception':
			base_model = Xception(include_top=False, weights='imagenet', pooling=args.pooling)
			model = Model(input=base_model.input, output=base_model.get_layer(args.output_layer).output)
			preprocess_mode='tf'
		elif args.model == 'inceptionresnet':
			base_model = InceptionResNetV2(include_top=False, weights='imagenet', pooling=args.pooling)
			model = Model(input=base_model.input, output=base_model.get_layer(args.output_layer).output)
			preprocess_mode='tf'
		elif args.model == 'mobilenet':
			base_model = MobileNet(include_top=False, weights='imagenet', pooling=args.pooling)
			model = Model(input=base_model.input, output=base_model.get_layer(args.output_layer).output)
			preprocess_mode='tf'
		elif args.model == 'mobilenet2':	
			base_model = MobileNetV2(include_top=False, weights='imagenet', pooling=args.pooling)
			model = Model(input=base_model.input, output=base_model.get_layer(args.output_layer).output)
			preprocess_mode='tf'
		elif args.model == 'nasnet':	
			base_model = NASNetLarge(include_top=False, weights='imagenet', pooling=args.pooling)
			model = Model(input=base_model.input, output=base_model.get_layer(args.output_layer).output)
			preprocess_mode='tf'
		elif args.model == 'resnet':
			base_model = ResNet50(include_top=False, weights='imagenet', pooling=args.pooling)
			model = Model(input=base_model.input, output=base_model.get_layer(args.output_layer).output)
			preprocess_mode='caffe'
		elif args.model == 'vgg16':
			base_model = VGG16(include_top=False, weights='imagenet', pooling=args.pooling)
			model = Model(input=base_model.input, output=base_model.get_layer(args.output_layer).output)
			preprocess_mode='caffe'
		elif args.model == 'vgg19':
			base_model = VGG19(include_top=False, weights='imagenet', pooling=args.pooling)
			model = Model(input=base_model.input, output=base_model.get_layer(args.output_layer).output)
			preprocess_mode='caffe'
		else:
			print ("Model not found")
			return 0


	return model,preprocess_mode
Пример #14
0
def train_net(args):
    torch.manual_seed(7)
    np.random.seed(7)
    checkpoint = args.checkpoint
    start_epoch = 0
    best_loss = float('inf')
    writer = SummaryWriter()
    epochs_since_improvement = 0

    # Initialize / load checkpoint
    if checkpoint is None:
        model = MobileNetV2()
        model = nn.DataParallel(model)

        optimizer = HNetOptimizer(torch.optim.Adam(model.parameters(), lr=args.lr))

    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        epochs_since_improvement = checkpoint['epochs_since_improvement']
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    logger = get_logger()

    # Move to GPU, if available
    model = model.to(device)

    # Loss function
    criterion = nn.MSELoss().to(device)

    # Custom dataloaders
    train_dataset = DeepHNDataset('train')
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
                                               num_workers=num_workers)
    valid_dataset = DeepHNDataset('valid')
    valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False,
                                               num_workers=num_workers)

    # Epochs
    for epoch in range(start_epoch, args.end_epoch):
        model.zero_grad()
        # One epoch's training
        train_loss = train(train_loader=train_loader,
                           model=model,
                           criterion=criterion,
                           optimizer=optimizer,
                           epoch=epoch,
                           logger=logger)

        writer.add_scalar('model/train_loss', train_loss, epoch)
        writer.add_scalar('model/learning_rate', optimizer.lr, epoch)
        print('\nCurrent effective learning rate: {}\n'.format(optimizer.lr))

        # One epoch's validation
        valid_loss = valid(valid_loader=valid_loader,
                           model=model,
                           criterion=criterion,
                           logger=logger)

        writer.add_scalar('model/valid_loss', valid_loss, epoch)

        # Check if there was an improvement
        is_best = valid_loss < best_loss
        best_loss = min(valid_loss, best_loss)
        if not is_best:
            epochs_since_improvement += 1
            print("\nEpochs since last improvement: %d\n" % (epochs_since_improvement,))
        else:
            epochs_since_improvement = 0

        # Save checkpoint
        save_checkpoint(epoch, epochs_since_improvement, model, optimizer, best_loss, is_best, output_dir)
Пример #15
0
    def build(self, input_shape):
        self._encoder = MobileNetV2(dropout_rate=0.2, activation=tf.nn.elu)  # TODO:
        self._decoder = Decoder(num_classes=self._num_classes)

        super().build(input_shape)
Пример #16
0
if __name__ == '__main__':
    model_names = [
        'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152', 'vgg16',
        'MnasNet', 'NasNet-A-Large', 'NasNet-A-Mobile', 'MobileNet-v2'
    ]
    models = [
        models.resnet18(),
        models.resnet34(),
        models.resnet50(),
        models.resnet101(),
        models.resnet152(),
        models.vgg16(),
        MnasNet(),
        NASNetALarge(),
        NASNetAMobile(),
        MobileNetV2()
    ]

    for i, model_name in enumerate(model_names):
        if model_name.find('NasNet-A-Large') >= 0:
            total_flops, total_memory = get_model_parm_flops(models[i], True)
        else:
            total_flops, total_memory = get_model_parm_flops(models[i])
        total_param = get_model_parm_nums(models[i])
        print('%s, %.2f, %.2f, %.2f, %.2f, %.2f' %
              (model_name, total_flops / 1e9, total_memory / 1e6,
               total_param / 1e6, total_flops /
               (total_memory + total_param), total_flops * 256 /
               (total_memory * 256 + total_param)))
        #print('%s:' %(model_name))
        #print('  + Number of FLOPs: %.2fG' % (total_flops / 1e9))
Пример #17
0
def load_model(model_file):
    model = MobileNetV2()
    state_dict = torch.load(model_file)
    model.load_state_dict(state_dict)
    model.to('cpu')
    return model
Пример #18
0
import time

import torch

from mobilenet_v2 import MobileNetV2

if __name__ == '__main__':
    checkpoint = 'BEST_checkpoint.tar'
    print('loading {}...'.format(checkpoint))
    start = time.time()
    checkpoint = torch.load(checkpoint)
    print('elapsed {} sec'.format(time.time() - start))
    model = checkpoint['model'].module
    # print(model)
    # print(type(model))

    # model.eval()
    filename = 'homonet.pt'
    print('saving {}...'.format(filename))
    start = time.time()
    torch.save(model.state_dict(), filename)
    print('elapsed {} sec'.format(time.time() - start))

    print('loading {}...'.format(filename))
    start = time.time()
    model = MobileNetV2()
    model.load_state_dict(torch.load(filename))
    print('elapsed {} sec'.format(time.time() - start))
Пример #19
0
def main():
    if not torch.cuda.is_available():
        sys.exit(1)
    start_t = time.time()

    cudnn.benchmark = True
    cudnn.enabled = True
    logging.info("args = %s", args)

    model = MobileNetV2()
    #Loading of a pre-trained mobilenet
    model_pretrained = models.mobilenet_v2(pretrained=True)
    dict_params = dict(model.named_parameters())
    dict_params_pretrained = dict(model_pretrained.named_parameters())
    print('Not pretrained: ', list(dict_params)[:10])
    print('Pretrained: ', list(dict_params_pretrained)[:10])
    for param in dict_params:
        dict_params[param].data.copy_(dict_params_pretrained[param].data)

    num_ftrs = model.classifier[1].in_features
    # Here the size of each output sample is set to 2.
    # Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).
    mobilenet.classifier[1] = nn.Linear(num_ftrs, 2)
    logging.info(model)
    model = nn.DataParallel(model).cuda()

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
    criterion_smooth = criterion_smooth.cuda()

    all_parameters = model.parameters()
    weight_parameters = []
    for pname, p in model.named_parameters():
        if 'fc' in pname or 'conv1' in pname or 'pwconv' in pname:
            weight_parameters.append(p)
    weight_parameters_id = list(map(id, weight_parameters))
    other_parameters = list(
        filter(lambda p: id(p) not in weight_parameters_id, all_parameters))

    optimizer = torch.optim.SGD(
        [{
            'params': other_parameters
        }, {
            'params': weight_parameters,
            'weight_decay': args.weight_decay
        }],
        args.learning_rate,
        momentum=args.momentum,
    )

    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,
                                                  lambda step:
                                                  (1.0 - step / args.epochs),
                                                  last_epoch=-1)
    start_epoch = 0
    best_top1_acc = 0
    checkpoint_tar = os.path.join(args.save, 'checkpoint.pth.tar')
    if os.path.exists(checkpoint_tar):
        logging.info('loading checkpoint {} ..........'.format(checkpoint_tar))
        checkpoint = torch.load(checkpoint_tar)
        start_epoch = checkpoint['epoch']
        best_top1_acc = checkpoint['best_top1_acc']
        model.load_state_dict(checkpoint['state_dict'])
        logging.info("loaded checkpoint {} epoch = {}".format(
            checkpoint_tar, checkpoint['epoch']))
    for epoch in range(start_epoch):
        scheduler.step()

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    crop_scale = 0.08
    lighting_param = 0.1
    train_transforms = transforms.Compose([
        transforms.RandomResizedCrop(224, scale=(crop_scale, 1.0)),
        Lighting(lighting_param),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(), normalize
    ])

    train_dataset = datasets.ImageFolder(traindir, transform=train_transforms)

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    epoch = start_epoch
    while epoch < args.epochs:
        train_obj, train_top1_acc, train_top5_acc, epoch = train(
            epoch, train_loader, model, criterion_smooth, optimizer, scheduler)
        valid_obj, valid_top1_acc, valid_top5_acc = validate(
            epoch, val_loader, model, criterion, args)

        is_best = False
        if valid_top1_acc > best_top1_acc:
            best_top1_acc = valid_top1_acc
            is_best = True

        save_checkpoint(
            {
                'epoch': epoch,
                'state_dict': model.state_dict(),
                'best_top1_acc': best_top1_acc,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.save)

        epoch += 1

    training_time = (time.time() - start_t) / 36000
    print('total training time = {} hours'.format(training_time))
Пример #20
0
def get_backbone(name, in_channels):
    if name == 'mobilenet_v2':
        return MobileNetV2(in_channels=in_channels)

    raise NotImplementedError
Пример #21
0
def main():
    if not torch.cuda.is_available():
        sys.exit(1)
    start_t = time.time()

    cudnn.benchmark = True
    cudnn.enabled=True
    logging.info("args = %s", args)

    # load model
    model = MobileNetV2()
    logging.info(model)
    model = nn.DataParallel(model).cuda()

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
    criterion_smooth = criterion_smooth.cuda()

    # split the weight parameter that need weight decay
    all_parameters = model.parameters()
    weight_parameters = []
    for pname, p in model.named_parameters():
        if 'fc' in pname or 'conv1' in pname or 'pwconv' in pname:
            weight_parameters.append(p)
    weight_parameters_id = list(map(id, weight_parameters))
    other_parameters = list(filter(lambda p: id(p) not in weight_parameters_id, all_parameters))

    # define the optimizer
    optimizer = torch.optim.SGD(
        [{'params' : other_parameters},
        {'params' : weight_parameters, 'weight_decay' : args.weight_decay}],
        args.learning_rate,
        momentum=args.momentum,
        )

    # define the learning rate scheduler
    # we use the linear learning rate here
    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda step : (1.0-step/args.epochs), last_epoch=-1)
    start_epoch = 0
    best_top1_acc= 0

    # load the checkpoint if it exists
    checkpoint_tar = os.path.join(args.save, 'checkpoint.pth.tar')
    if os.path.exists(checkpoint_tar):
        logging.info('loading checkpoint {} ..........'.format(checkpoint_tar))
        checkpoint = torch.load(checkpoint_tar)
        start_epoch = checkpoint['epoch']
        best_top1_acc = checkpoint['best_top1_acc']
        model.load_state_dict(checkpoint['state_dict'])
        logging.info("loaded checkpoint {} epoch = {}" .format(checkpoint_tar, checkpoint['epoch']))

    # adjust the learning rate according to the checkpoint
    for epoch in range(start_epoch):
        scheduler.step()

    # load training data
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    # data augmentation
    crop_scale = 0.08
    lighting_param = 0.1
    train_transforms = transforms.Compose([
        transforms.RandomResizedCrop(224, scale=(crop_scale, 1.0)),
        Lighting(lighting_param),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize])

    train_dataset = datasets.ImageFolder(
        traindir,
        transform=train_transforms)

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True)

    # load validation data
    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(valdir, transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    # train the model
    epoch = start_epoch
    while epoch < args.epochs:
        train_obj, train_top1_acc,  train_top5_acc = train(epoch,  train_loader, model, criterion_smooth, optimizer, scheduler)
        valid_obj, valid_top1_acc, valid_top5_acc = validate(epoch, val_loader, model, criterion, args)

        is_best = False
        if valid_top1_acc > best_top1_acc:
            best_top1_acc = valid_top1_acc
            is_best = True

        save_checkpoint({
            'epoch': epoch,
            'state_dict': model.state_dict(),
            'best_top1_acc': best_top1_acc,
            'optimizer' : optimizer.state_dict(),
            }, is_best, args.save)

        epoch += 1

    training_time = (time.time() - start_t) / 36000
    print('total training time = {} hours'.format(training_time))
Пример #22
0
def detect_hand(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock

    # initialize the motion detector and the total number of frames
    # read thus far
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0

    convert = {0: 'la', 1: 'dam', 2: 'keo'}
    checkpoint = torch.load('MBN_epoch_1_loss_0.10.pth',
                            map_location=torch.device('cpu'))
    # print(checkpoint)
    model = MobileNetV2(num_classes=3)
    model.load_state_dict(checkpoint)
    # print(model)
    model.eval()

    # set robot and man score
    robot = 0
    man = 0

    mapping = [22, 27, 17]
    naming = ['paper', 'rock', 'scissors']

    isEnd = 1

    # 2 = scissors = GPIO17
    # 1 = rock = GPIO27
    # 0 = paper = GPIO22
    # 17 keo 27 bua 22 la

    # loop over frames from the video stream
    while True:
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        # robot random
        x = randint(0, 2)
        frame = vs.read()
        frame = imutils.resize(frame, width=400)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # grab the current timestamp and draw it on the frame
        timestamp = datetime.datetime.now()
        cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"),
                    (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                    (0, 0, 255), 1)

        # if the total number of frames has reached a sufficient
        # number to construct a reasonable background model, then
        # continue to process the frame
        if total > frameCount:

            image1 = preprocess_image(frame)

            output = model(image1)

            _, predicted = torch.max(output.data, 1)
            print(convert[int(predicted)])

            human = int(predicted)

            cv2.putText(frame, convert[int(predicted)],
                        (10, frame.shape[0] - 30), cv2.FONT_HERSHEY_SIMPLEX,
                        0.35, (0, 0, 255), 1)

            if human == 0 and isEnd == 1:  # la
                isEnd = 0
                runUpServo(mapping[x])
                if x == 0:
                    print('Robot ' + naming[x] + ' x human ' + naming[human] +
                          ' : Draw')
                elif x == 1:
                    print('Robot ' + naming[x] + ' x human ' + naming[human] +
                          ' : Human win')
                    man += 1
                else:
                    print('Robot ' + naming[x] + ' x human ' + naming[human] +
                          ' : Robot win')
                    robot += 1

            elif human == 2 and isEnd == 1:  # keo
                isEnd = 0
                runUpServo(mapping[x])
                if x == 0:
                    print('Robot ' + naming[x] + ' x human ' + naming[human] +
                          ' : Human win')
                    man += 1
                elif x == 1:
                    print('Robot ' + naming[x] + ' x human ' + naming[human] +
                          ' : Robot  win')
                    robot += 1
                else:
                    print('Robot ' + naming[x] + ' x human ' + naming[human] +
                          ' : Draw')

            elif human == 1:  # reset game
                runDownServo()
                isEnd = 1
                print('Scoreboard Robot vs Human: %d : %d' % (robot, man))

        # update the background model and increment the total number
        # of frames read thus far
        md.update(gray)
        total += 1

        # acquire the lock, set the output frame, and release the
        # lock
        with lock:
            outputFrame = frame.copy()
Пример #23
0
def train_net(args):
    torch.manual_seed(7)
    np.random.seed(7)
    checkpoint = args.checkpoint
    start_epoch = 0
    best_acc = float('-inf')
    writer = SummaryWriter()
    epochs_since_improvement = 0

    # Initialize / load checkpoint
    if checkpoint is None:
        if args.network == 'r18':
            model = resnet18(args)
        elif args.network == 'r34':
            model = resnet34(args)
        elif args.network == 'r50':
            model = resnet50(args)
        elif args.network == 'r101':
            model = resnet101(args)
        elif args.network == 'r152':
            model = resnet152(args)
        elif args.network == 'mobile':
            from mobilenet_v2 import MobileNetV2
            model = MobileNetV2()
        else:
            raise TypeError('network {} is not supported.'.format(
                args.network))

        metric_fc = ArcMarginModel(args)

        if args.optimizer == 'sgd':
            optimizer = torch.optim.SGD([{
                'params': model.parameters()
            }, {
                'params': metric_fc.parameters()
            }],
                                        lr=args.lr,
                                        momentum=args.mom,
                                        weight_decay=args.weight_decay)
        else:
            optimizer = torch.optim.Adam([{
                'params': model.parameters()
            }, {
                'params': metric_fc.parameters()
            }],
                                         lr=args.lr,
                                         weight_decay=args.weight_decay)

    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        epochs_since_improvement = checkpoint['epochs_since_improvement']
        model = checkpoint['model']
        metric_fc = checkpoint['metric_fc']
        optimizer = checkpoint['optimizer']

    model = nn.DataParallel(model)
    metric_fc = nn.DataParallel(metric_fc)

    # Move to GPU, if available
    model = model.to(device)
    metric_fc = metric_fc.to(device)

    # Loss function
    if args.focal_loss:
        criterion = FocalLoss(gamma=args.gamma).to(device)
    else:
        criterion = nn.CrossEntropyLoss().to(device)

    # Custom dataloaders
    train_dataset = ArcFaceDataset('train')
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=num_workers)

    # Epochs
    for epoch in range(start_epoch, args.end_epoch):
        # Decay learning rate if there is no improvement for 2 consecutive epochs, and terminate training after 10
        if epochs_since_improvement == 10:
            break
        if epochs_since_improvement > 0 and epochs_since_improvement % 2 == 0:
            checkpoint = 'BEST_checkpoint.tar'
            checkpoint = torch.load(checkpoint)
            model = checkpoint['model']
            metric_fc = checkpoint['metric_fc']
            optimizer = checkpoint['optimizer']

            adjust_learning_rate(optimizer, 0.5)

        # One epoch's training
        train_loss, train_top1_accs = train(train_loader=train_loader,
                                            model=model,
                                            metric_fc=metric_fc,
                                            criterion=criterion,
                                            optimizer=optimizer,
                                            epoch=epoch)
        lr = optimizer.param_groups[0]['lr']
        print('\nCurrent effective learning rate: {}\n'.format(lr))
        # print('Step num: {}\n'.format(optimizer.step_num))

        writer.add_scalar('model/train_loss', train_loss, epoch)
        writer.add_scalar('model/train_accuracy', train_top1_accs, epoch)
        writer.add_scalar('model/learning_rate', lr, epoch)

        if epoch % 5 == 0:
            # One epoch's validation
            megaface_acc = megaface_test(model)
            writer.add_scalar('model/megaface_accuracy', megaface_acc, epoch)

            # Check if there was an improvement
            is_best = megaface_acc > best_acc
            best_acc = max(megaface_acc, best_acc)
            if not is_best:
                epochs_since_improvement += 1
                print("\nEpochs since last improvement: %d\n" %
                      (epochs_since_improvement, ))
            else:
                epochs_since_improvement = 0

            # Save checkpoint
            save_checkpoint(epoch, epochs_since_improvement, model, metric_fc,
                            optimizer, best_acc, is_best)
Пример #24
0
def train_net(args):
    torch.manual_seed(7)
    np.random.seed(7)
    checkpoint = args.checkpoint
    start_epoch = 0
    best_acc = float('-inf')
    writer = SummaryWriter()
    epochs_since_improvement = 0

    # Initialize / load checkpoint
    if checkpoint is None:
        if args.network == 'r18':
            model = resnet18(args)
        elif args.network == 'r34':
            model = resnet34(args)
        elif args.network == 'r50':
            model = resnet50(args)
        elif args.network == 'r101':
            model = resnet101(args)
        elif args.network == 'r152':
            model = resnet152(args)
        elif args.network == 'mobile':
            model = MobileNetV2()
        else:
            raise TypeError('network {} is not supported.'.format(
                args.network))

        # print(model)
        model = nn.DataParallel(model)
        metric_fc = ArcMarginModel(args)
        metric_fc = nn.DataParallel(metric_fc)

        if args.optimizer == 'sgd':
            optimizer = torch.optim.SGD([{
                'params': model.parameters()
            }, {
                'params': metric_fc.parameters()
            }],
                                        lr=args.lr,
                                        momentum=args.mom,
                                        weight_decay=args.weight_decay)
        else:
            optimizer = torch.optim.Adam([{
                'params': model.parameters()
            }, {
                'params': metric_fc.parameters()
            }],
                                         lr=args.lr,
                                         weight_decay=args.weight_decay)

    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        epochs_since_improvement = checkpoint['epochs_since_improvement']
        model = checkpoint['model']
        metric_fc = checkpoint['metric_fc']
        optimizer = checkpoint['optimizer']

    logger = get_logger()

    # Move to GPU, if available
    model = model.to(device)
    metric_fc = metric_fc.to(device)

    # Loss function
    if args.focal_loss:
        criterion = FocalLoss(gamma=args.gamma).to(device)
    else:
        criterion = nn.CrossEntropyLoss().to(device)

    # Custom dataloaders
    train_dataset = ArcFaceDataset('train')
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=4)

    scheduler = StepLR(optimizer, step_size=args.lr_step, gamma=0.1)

    # Epochs
    for epoch in range(start_epoch, args.end_epoch):
        # One epoch's training
        train_loss, train_acc = train(train_loader=train_loader,
                                      model=model,
                                      metric_fc=metric_fc,
                                      criterion=criterion,
                                      optimizer=optimizer,
                                      epoch=epoch,
                                      logger=logger)

        writer.add_scalar('model/train_loss', train_loss, epoch)
        writer.add_scalar('model/train_acc', train_acc, epoch)

        # One epoch's validation
        lfw_acc, threshold = lfw_test(model)
        writer.add_scalar('model/valid_acc', lfw_acc, epoch)
        writer.add_scalar('model/valid_thres', threshold, epoch)

        # Check if there was an improvement
        is_best = lfw_acc > best_acc
        best_acc = max(lfw_acc, best_acc)
        if not is_best:
            epochs_since_improvement += 1
            print("\nEpochs since last improvement: %d\n" %
                  (epochs_since_improvement, ))
        else:
            epochs_since_improvement = 0

        # Save checkpoint
        save_checkpoint(epoch, epochs_since_improvement, model, metric_fc,
                        optimizer, best_acc, is_best)

        scheduler.step(epoch)
import sys

#sys.path.insert(0, '.')
import torch
from torch.autograd import Variable
from torchvision.models import resnet
import pytorch_to_caffe
#from MobileNetV2 import MobileNetV2
from mobilenet_v2 import MobileNetV2

if __name__ == '__main__':
    name = 'MobileNetV2'
    net= MobileNetV2().eval()
    #checkpoint = torch.load("/home/shining/Downloads/mobilenet_v2.pth.tar")

    #net.load_state_dict(checkpoint)
    net.load_state_dict(torch.load('/home/whl/convertor/model.epoch-20.step-152166.pth',map_location='cpu')['state_dict'])
    input = torch.ones([1, 3, 224, 224])
    # input=torch.ones([1,3,224,224])
    pytorch_to_caffe.trans_net(net, input, name)
    pytorch_to_caffe.save_prototxt('{}.prototxt'.format(name))
    pytorch_to_caffe.save_caffemodel('{}.caffemodel'.format(name))
Пример #26
0
    image = val_tfms(pil_image)
    image = image.unsqueeze_(0).cpu()
    # image /= 255.00
    image = F.interpolate(image, size=256)
    return image


if __name__ == "__main__":
    ########################### Magic code line #################################
    vs = VideoStream(src=0).start()

    convert = {0: 'la', 1: 'dam', 2: 'keo'}
    checkpoint = torch.load('MBN_epoch_1_loss_0.10.pth',
                            map_location=torch.device('cpu'))
    # print(checkpoint)
    model = MobileNetV2(num_classes=3)
    model.load_state_dict(checkpoint)
    # print(model)
    model.eval()
    # Your image here
    while True:
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        time.sleep(200)
        frame = vs.read()
        frame = imutils.resize(frame, width=400)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # grab the current timestamp and draw it on the frame
        timestamp = datetime.datetime.now()
Пример #27
0
parser = argparse.ArgumentParser(description = "run tf nvtx model")
parser.add_argument("-m", "--model", choices=["resnet", "mobilenet"])
arg = parser.parse_args() 
        
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '0'
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
#os.environ["TF_XLA_FLAGS"] = "--tf_xla_auto_jit=2 --tf_xla_cpu_global_jit"

tf.keras.backend.clear_session()
tf.config.optimizer.set_jit(True) # Enable XLA

if arg.model == 'resnet':
    from resnet import ResNet50
    model = ResNet50(include_top=True, weights=None, input_tensor=None, input_shape=(224, 224, 3),
            pooling=None, classes=1000)

if arg.model == 'mobilenet':
    from mobilenet_v2 import MobileNetV2
    model = MobileNetV2(alpha=1.0, include_top=True, weights=None, input_tensor=None, 
            pooling=None, classes=1000, classifier_activation='softmax', input_shape=(224, 224, 3))

model.summary()

shape=[1,224,224,3]
picture = np.ones(shape, dtype=np.float32)

nSteps=50
for i in range(0, nSteps):
    ret = model.predict(picture1, batch_size=1)

Пример #28
0
    testset = oldBank.Bankset(oldBank.TESTSET_PATH, transform_test)
    testloader = DataLoader(testset, batch_size=6, shuffle=True, num_workers=4)

    # float_model = load_model(saved_model_dir + float_model_file).to('cpu')

    # print('\n Inverted Residual Block: Before fusion \n\n', float_model.features[1].conv)
    # float_model.eval()

    # # Fuses modules
    # float_model.fuse_model()

    # # Note fusion of Conv+BN+Relu and Conv+Relu
    # print('\n Inverted Residual Block: After fusion\n\n',float_model.features[1].conv)

    model = MobileNetV2(
        num_classes=7
    )  #models.resnet18(pretrained=True, progress=True, quantize=False)
    #model.layer2 = Identity()
    #model.layer4 = Identity()
    #model.layer4[0].conv1 = nn.Conv2d(128, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
    #model.layer4[0].downsample[0] = nn.Conv2d(128, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)

    #model.fuse_model()

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer,
                                                milestones=[25, 60, 70, 80],
                                                gamma=0.1)
    #exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1)
Пример #29
0
	"""corn""",
	"""acorn""",
	"""hip, rose hip, rosehip""",
	"""buckeye, horse chestnut, conker""",
	"""coral fungus""",
	"""agaric""",
	"""gyromitra""",
	"""stinkhorn, carrion fungus""",
	"""earthstar""",
	"""hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa""",
	"""bolete""",
	"""ear, spike, capitulum""",
	"""toilet tissue, toilet paper, bathroom tissue""" )

IMG_SIZE = 224
mbv2 = MobileNetV2()
inet = caffe.Net('./mobilenet_v2_deploy.prototxt', './mobilenet_v2.caffemodel', caffe.TEST)

def load_image(filn):
	img = Image.open(filn)
	cropBox = None
	width, height = img.size
	if width > height:
		tmpv = (width - height) // 2
		cropBox = (tmpv, 0, tmpv + height, height)
	else:
		tmpv = (height - width) // 2
		cropBox = (0, tmpv, width, tmpv + width)
	img = img.crop(box=cropBox).resize((IMG_SIZE, IMG_SIZE), resample=Image.BICUBIC)
	img = np.asarray(img, dtype=np.float32)
	ret = np.empty((3, IMG_SIZE, IMG_SIZE), dtype=np.float32)