Esempio n. 1
0
def get_instance_segmentation_model(bone='resnet50', attention=False):
    if bone == 'mobilenet_v2':
        if attention == False:
            backbone = models.mobilenet_v2(pretrained=True,
                                           att=attention).features
        if attention == True:
            backbone = models.mobilenet_v2(pretrained=False,
                                           att=attention).features
        backbone.out_channels = 1280
    if bone == 'googlenet':
        if attention == False:
            backbone = models.googlenet(pretrained=True)
        if attention == True:
            backbone = models.googlenet(pretrained=False)
        backbone.out_channels = 1024
    if bone == 'densenet121':
        if attention == False:
            backbone = models.densenet121(pretrained=True,
                                          att=attention).features
        if attention == True:
            backbone = models.densenet121(pretrained=False,
                                          att=attention).features
        backbone.out_channels = 1024
    if bone == 'resnet50':
        if attention == False:
            backbone = models.resnet50(pretrained=True, att=attention)
        if attention == True:
            backbone = models.resnet50(pretrained=False, att=attention)
        backbone.out_channels = 2048
    if bone == 'shufflenet_v2_x1_0':
        if attention == False:
            backbone = models.shufflenet_v2_x1_0(pretrained=True)
        if attention == True:
            backbone = models.shufflenet_v2_x1_0(pretrained=False)
        backbone.out_channels = 1024
    if bone == 'inception_v3':
        if attention == False:
            backbone = models.inception_v3(
            )  #'InceptionOutputs' object has no attribute 'values'
        if attention == True:
            backbone = models.inception_v3(
            )  #'InceptionOutputs' object has no attribute 'values'
        backbone.out_channels = 2048
    if bone == 'squeezenet1_0':
        if attention == False:
            backbone = models.squeezenet1_0(pretrained=True).features
        if attention == True:
            backbone = models.squeezenet1_0(pretrained=False).features
        backbone.out_channels = 512

    anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512), ),
                                       aspect_ratios=((0.5, 1.0, 2.0), ))
    roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
                                                    output_size=7,
                                                    sampling_ratio=2)
    model = MaskRCNN(backbone,
                     num_classes=2,
                     rpn_anchor_generator=anchor_generator,
                     box_roi_pool=roi_pooler)
    return model
Esempio n. 2
0
def visual(**kwargs):
    opt.parse(kwargs)
    model = models.densenet121()
    model.classifier = torch.nn.Linear(1024, 2)
    checkpoint = torch.load(
        '/home/hdc/yfq/CAG/checkpoints/Densenet1210219_19:09:46.pth')
    model_dict = model.state_dict()
    state_dict = {
        k: v
        for k, v in checkpoint.items()
        if k in model_dict and "classifier" not in k
    }
    model.load_state_dict(state_dict, False)

    fc_weight = checkpoint['module.classifier.weight']
    normalize = T.Normalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225])
    transforms0 = T.Compose([T.RandomResizedCrop(224)])
    transforms1 = T.Compose([T.ToTensor(), normalize])
    # data
    img_path = '/home/hdc/yfq/CAG/data/visual1/3.jpg'
    data = Image.open(img_path)
    data0 = transforms0(data)
    data1 = transforms1(data0)
    data1 = data1.unsqueeze(0)
    model.eval()
    score, feature = model(data1)
    CAMs = returnCAM(feature, fc_weight)
    _, _, height, width = data1.size()
    heatmap = cv2.applyColorMap(cv2.resize(CAMs[1], (width, height)),
                                cv2.COLORMAP_JET)
    result = heatmap * 0.3 + np.array(data0) * 0.5
    cv2.imwrite('/home/hdc/yfq/CAG/data/visual1/3.CAM0.bmp', result)
    return 1
Esempio n. 3
0
def Densenet121(num_classes, test=False):
    model = densenet121()
    if not test:
        if LOCAL_PRETRAINED['densenet121'] == None:
            state_dict = load_state_dict_from_url(model_urls['densenet121'],
                                                  progress=True)
        else:
            state_dict = state_dict = torch.load(
                LOCAL_PRETRAINED['densenet121'])

        from collections import OrderedDict
        new_state_dict = OrderedDict()

        for k, v in state_dict.items():
            # print(k)  #打印预训练模型的键,发现与网络定义的键有一定的差别,因而需要将键值进行对应的更改,将键值分别对应打印出来就可以看出不同,根据不同进行修改
            # torchvision中的网络定义,采用了正则表达式,来更改键值,因为这里简单,没有再去构建正则表达式
            # 直接利用if语句筛选不一致的键
            ### 修正键值的不对应
            if k.split('.')[0] == 'features' and (len(k.split('.'))) > 4:
                k = k.split('.')[0] + '.' + k.split('.')[1] + '.' + k.split(
                    '.')[2] + '.' + k.split('.')[-3] + k.split(
                        '.')[-2] + '.' + k.split('.')[-1]
            # print(k)
            else:
                pass
            new_state_dict[k] = v
        model.load_state_dict(new_state_dict)
    fc_features = model.classifier.in_features
    model.classifier = nn.Linear(fc_features, num_classes)
    return model
Esempio n. 4
0
def create_model(name, num_classes):
    if name == 'resnet34':
        model = models.resnet34(True)
        model.fc = nn.Linear(model.fc.in_features, num_classes)
        nn.init.xavier_uniform(model.fc.weight)
        nn.init.constant(model.fc.bias, 0)
    elif name == 'resnet152':
        model = models.resnet152(True)
        model.fc = nn.Linear(model.fc.in_features, num_classes)
        nn.init.xavier_uniform(model.fc.weight)
        nn.init.constant(model.fc.bias, 0)
    elif name == 'densenet121':
        model = models.densenet121(True)
        model.classifier = nn.Linear(model.classifier.in_features, num_classes)
        nn.init.xavier_uniform(model.classifier.weight)
        nn.init.constant(model.classifier.bias, 0)
    elif name == 'vgg11_bn':
        model = models.vgg11_bn(False, num_classes)
    elif name == 'vgg19_bn':
        model = models.vgg19_bn(True)
        model.classifier._modules['6'] = nn.Linear(model.classifier._modules['6'].in_features, num_classes)
        nn.init.xavier_uniform(model.classifier._modules['6'].weight)
        nn.init.constant(model.classifier._modules['6'].bias, 0)
    elif name == 'alexnet':
        model = models.alexnet(True)
        model.classifier._modules['6'] = nn.Linear(model.classifier._modules['6'].in_features, num_classes)
        nn.init.xavier_uniform(model.classifier._modules['6'].weight)
        nn.init.constant(model.classifier._modules['6'].bias, 0)
    else:
        model = Net(num_classes)

    return model
Esempio n. 5
0
def create_model(name, num_classes):
    if name == 'resnet34':
        model = models.resnet34(True)
        model.fc = nn.Linear(model.fc.in_features, num_classes)
        nn.init.xavier_uniform(model.fc.weight)
        nn.init.constant(model.fc.bias, 0)
    elif name == 'resnet50':
        model = models.resnet50(True)
        model.fc = nn.Linear(model.fc.in_features, num_classes)
        nn.init.xavier_uniform(model.fc.weight)
        nn.init.constant(model.fc.bias, 0)
    elif name == 'resnet152':
        model = models.resnet152(True)
        model.fc = nn.Linear(model.fc.in_features, num_classes)
        nn.init.xavier_uniform(model.fc.weight)
        nn.init.constant(model.fc.bias, 0)
    elif name == 'seresnet50':
        model = models.se_resnet50()
        model.last_linear = nn.Linear(model.last_linear.in_features,
                                      num_classes,
                                      bias=True)
    elif name == 'seresnet152':
        model = models.se_resnet152()
        model.last_linear = nn.Linear(model.last_linear.in_features,
                                      num_classes,
                                      bias=True)
    elif name == 'dpn131':
        model = models.dpn131()
        model.classifier = nn.Conv2d(2688,
                                     num_classes,
                                     kernel_size=1,
                                     bias=True)
    elif name == 'densenet121':
        model = models.densenet121(True)
        model.classifier = nn.Linear(model.classifier.in_features, num_classes)
        nn.init.xavier_uniform(model.classifier.weight)
        nn.init.constant(model.classifier.bias, 0)
    elif name == 'vgg11_bn':
        model = models.vgg11_bn(False, num_classes)
    elif name == 'vgg19_bn':
        model = models.vgg19_bn(True)
        model.classifier._modules['6'] = nn.Linear(
            model.classifier._modules['6'].in_features, num_classes)
        nn.init.xavier_uniform(model.classifier._modules['6'].weight)
        nn.init.constant(model.classifier._modules['6'].bias, 0)
    elif name == 'alexnet':
        model = models.alexnet(True)
        model.classifier._modules['6'] = nn.Linear(
            model.classifier._modules['6'].in_features, num_classes)
        nn.init.xavier_uniform(model.classifier._modules['6'].weight)
        nn.init.constant(model.classifier._modules['6'].bias, 0)
    else:
        model = Net(num_classes)

    return model
Esempio n. 6
0
torch.manual_seed(args.seed)

if args.cuda:
    torch.cuda.manual_seed(args.seed)

kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}

print('load data: ', args.dataset)
train_loader, test_loader = data_loader.getTargetDataSet(
    args.dataset, args.batch_size, args.imageSize, args.dataroot)

print('Load model')
if args.model == 'resnet':
    model = models.resnet18(pretrained=args.pretrained)
elif args.model == 'densenet':
    model = models.densenet121(pretrained=args.pretrained)
else:
    raise Exception('invalid model selected')

if args.cuda:
    model.cuda()

print('Setup optimizer')
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
decreasing_lr = list(map(int, args.decreasing_lr.split(',')))


def train(epoch):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        if args.cuda:
Esempio n. 7
0
test = models.resnet34(num_classes=num_classes, pretrained='imagenet')
assert test(inputs).size()[1] == num_classes
print('ok')
test = models.resnet50(num_classes=num_classes, pretrained='imagenet')
assert test(inputs).size()[1] == num_classes
print('ok')
test = models.resnet101(num_classes=num_classes, pretrained='imagenet')
assert test(inputs).size()[1] == num_classes
print('ok')
test = models.resnet152(num_classes=num_classes, pretrained='imagenet')
assert test(inputs).size()[1] == num_classes
print('ok')
test = models.alexnet(num_classes=num_classes, pretrained='imagenet')
assert test(inputs).size()[1] == num_classes
print('ok')
test = models.densenet121(num_classes=num_classes, pretrained='imagenet')
assert test(inputs).size()[1] == num_classes
print('ok')
test = models.densenet169(num_classes=num_classes, pretrained='imagenet')
assert test(inputs).size()[1] == num_classes
print('ok')
test = models.densenet201(num_classes=num_classes, pretrained='imagenet')
assert test(inputs).size()[1] == num_classes
print('ok')
test = models.densenet201(num_classes=num_classes, pretrained='imagenet')
assert test(inputs).size()[1] == num_classes
print('ok')

test = models.inceptionv3(num_classes=num_classes, pretrained='imagenet')
assert test(torch.rand([2, 3, 299, 299]))[0].size()[1] == num_classes
print('ok')
def train(working_dir, grid_size, learning_rate, batch_size, num_walks,
          model_type, fn):
    train_props, val_props, test_props = get_props(working_dir,
                                                   dtype=np.float32)
    means_stds = np.loadtxt(working_dir + "/means_stds.csv",
                            dtype=np.float32,
                            delimiter=',')

    # filter out redundant qm8 properties
    if train_props.shape[1] == 16:
        filtered_labels = list(range(0, 8)) + list(range(12, 16))
        train_props = train_props[:, filtered_labels]
        val_props = val_props[:, filtered_labels]
        test_props = test_props[:, filtered_labels]

        means_stds = means_stds[:, filtered_labels]
    if model_type == "resnet18":
        model = ResNet(BasicBlock, [2, 2, 2, 2],
                       grid_size,
                       "regression",
                       feat_nums,
                       e_sizes,
                       num_classes=train_props.shape[1])
    elif model_type == "resnet34":
        model = ResNet(BasicBlock, [3, 4, 6, 3],
                       grid_size,
                       "regression",
                       feat_nums,
                       e_sizes,
                       num_classes=train_props.shape[1])
    elif model_type == "resnet50":
        model = ResNet(Bottleneck, [3, 4, 6, 3],
                       grid_size,
                       "regression",
                       feat_nums,
                       e_sizes,
                       num_classes=train_props.shape[1])
    elif model_type == "densenet121":
        model = densenet121(grid_size,
                            "regression",
                            feat_nums,
                            e_sizes,
                            num_classes=train_props.shape[1])
    elif model_type == "densenet161":
        model = densenet161(grid_size,
                            "regression",
                            feat_nums,
                            e_sizes,
                            num_classes=train_props.shape[1])
    elif model_type == "densenet169":
        model = densenet169(grid_size,
                            "regression",
                            feat_nums,
                            e_sizes,
                            num_classes=train_props.shape[1])
    elif model_type == "densenet201":
        model = densenet201(grid_size,
                            "regression",
                            feat_nums,
                            e_sizes,
                            num_classes=train_props.shape[1])
    else:
        print("specify a valid model")
        return
    model.float()
    model.cuda()
    loss_function_train = nn.MSELoss(reduction='none')
    loss_function_val = nn.L1Loss(reduction='none')
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    # if model_type[0] == "r":
    # 	batch_size = 128
    # 	optimizer = torch.optim.SGD(model.parameters(), lr=0.1,
    # 					   momentum=0.9, weight_decay=5e-4, nesterov=True)
    # elif model_type[0] == "d":
    # 	batch_size = 512
    # 	optimizer = torch.optim.SGD(model.parameters(), lr=0.1,
    # 					   momentum=0.9, weight_decay=1e-4, nesterov=True)
    # else:
    # 	print("specify a vlid model")
    # 	return

    stds = means_stds[1, :]
    tl_list = []
    vl_list = []

    log_file = open(fn + "txt", "w")
    log_file.write("start")
    log_file.flush()

    for file_num in range(num_loads):
        if file_num % 20 == 0:
            model_file = open("../../scratch/" + fn + ".pkl", "wb")
            pickle.dump(model, model_file)
            model_file.close()

        log_file.write("load: " + str(file_num))
        print("load: " + str(file_num))
        # Get new random walks
        if file_num == 0:
            t = time.time()
            train_loader, val_loader, test_loader = get_loaders(working_dir, \
                        file_num, \
                        grid_size, \
                        batch_size, \
                        train_props, \
                        val_props=val_props, \
                        test_props=test_props)
            print("load time")
            print(time.time() - t)
        else:
            file_num = random.randint(0, num_walks - 1)
            t = time.time()
            train_loader, _, _ = get_loaders(working_dir, \
                   file_num, \
                   grid_size, \
                   batch_size, \
                   train_props)
            print("load time")
            print(time.time() - t)
        # Train on set of random walks, can do multiple epochs if desired
        for epoch in range(epochs_per_load):
            model.train()
            t = time.time()
            train_loss_list = []
            train_mae_loss_list = []
            for i, (walks_int, walks_float, props) in enumerate(train_loader):
                walks_int = walks_int.cuda()
                walks_int = walks_int.long()
                walks_float = walks_float.cuda()
                walks_float = walks_float.float()
                props = props.cuda()
                outputs = model(walks_int, walks_float)
                # Individual losses for each item
                loss_mae = torch.mean(loss_function_val(props, outputs), 0)
                train_mae_loss_list.append(loss_mae.cpu().detach().numpy())
                loss = torch.mean(loss_function_train(props, outputs), 0)
                train_loss_list.append(loss.cpu().detach().numpy())
                # Loss converted to single value for backpropagation
                loss = torch.sum(loss)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
            model.eval()
            val_loss_list = []
            with torch.no_grad():
                for i, (walks_int, walks_float,
                        props) in enumerate(val_loader):
                    walks_int = walks_int.cuda()
                    walks_int = walks_int.long()
                    walks_float = walks_float.cuda()
                    walks_float = walks_float.float()
                    props = props.cuda()
                    outputs = model(walks_int, walks_float)
                    # Individual losses for each item
                    loss = loss_function_val(props, outputs)
                    val_loss_list.append(loss.cpu().detach().numpy())
            # ith row of this array is the losses for each label in batch i
            train_loss_arr = np.array(train_loss_list)
            train_mae_arr = np.array(train_mae_loss_list)
            log_file.write("training mse loss\n")
            log_file.write(str(np.mean(train_loss_arr)) + "\n")
            log_file.write("training mae loss\n")
            log_file.write(str(np.mean(train_mae_arr)) + "\n")
            print("training mse loss")
            print(str(np.mean(train_loss_arr)))
            print("training mae loss")
            print(str(np.mean(train_mae_arr)))
            val_loss_arr = np.concatenate(val_loss_list, 0)
            val_loss = np.mean(val_loss_arr, 0)
            log_file.write("val loss\n")
            log_file.write(str(np.mean(val_loss_arr)) + "\n")
            print("val loss")
            print(str(np.mean(val_loss_arr)))
            # Unnormalized loss is for comparison to papers
            tnl = np.mean(train_mae_arr, 0)
            log_file.write("train normalized losses\n")
            log_file.write(" ".join(list(map(str, tnl))) + "\n")
            print("train normalized losses")
            print(" ".join(list(map(str, tnl))))
            log_file.write("val normalized losses\n")
            log_file.write(" ".join(list(map(str, val_loss))) + "\n")
            print("val normalized losses")
            print(" ".join(list(map(str, val_loss))))
            tunl = stds * tnl
            log_file.write("train unnormalized losses\n")
            log_file.write(" ".join(list(map(str, tunl))) + "\n")
            print("train unnormalized losses")
            print(" ".join(list(map(str, tunl))))
            vunl = stds * val_loss
            log_file.write("val unnormalized losses\n")
            log_file.write(" ".join(list(map(str, vunl))) + "\n")
            log_file.write("\n")
            print("val unnormalized losses")
            print(" ".join(list(map(str, vunl))))
            print("\n")
            print("time")
            print(time.time() - t)
        file_num += 1
        log_file.flush()
    log_file.close()
    return model
    csv = pd.read_csv(labelPath)
    level = Series.as_matrix(csv['level'])
    files = Series.as_matrix(csv['image'])
    return dict(zip(files, level))

def GetPreprocess():
    return transforms.Compose([
        transforms.Resize((width, height), interpolation=2),
        transforms.ToTensor(),
    ])


if __name__ == "__main__":
    import sys

    model = densenet121(pretrained=False)
    # fc_features = model.fc.in_features 138448
    model.classifier = nn.Linear(model.classifier.in_features, 2, bias=True)
    if torch.cuda.is_available():
        model.cuda()

    checkpointpath = '../checkpoint/'
    if os.path.isfile(os.path.join(checkpointpath, resume)):
        print("loading checkpoint '{}'".format(resume))
        checkpoint = torch.load(os.path.join(checkpointpath, resume))
        val_acc = checkpoint['acc']
        print('model accuracy is ',  val_acc)
        model.load_state_dict(checkpoint['state_dict'])
    else:
        print("no checkpoint found at '{}'".format(os.path.join(checkpointpath, resume)))
args = parser.parse_args()
print(args)
args.cuda = not args.no_cuda and torch.cuda.is_available()
print("Random Seed: ", args.seed)
torch.manual_seed(args.seed)

if args.cuda:
    torch.cuda.manual_seed(args.seed)

kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}

print('Load model')
if args.model == 'resnet':
    model = models.resnet18(num_classes=args.num_classes)
elif args.model == 'densenet':
    model = models.densenet121(num_classes=args.num_classes)
else:
    raise Exception('invalid model selected')
model.load_state_dict(torch.load(args.pre_trained_net))

print('load target data: ',args.dataset)
_, test_loader = data_loader.getTargetDataSet(args.dataset, args.batch_size, args.imageSize, args.dataroot)

print('load non target data: ',args.out_dataset)
nt_test_loader = data_loader.getNonTargetDataSet(args.out_dataset, args.batch_size, args.imageSize, args.dataroot)

if args.cuda:
    model.cuda()

def generate_target():
    model.eval()
Esempio n. 11
0
def main():
    # set the path to pre-trained model and output
    args.outf = args.outf + args.net_type + '_' + args.dataset + '/'
    if os.path.isdir(args.outf) == False:
        os.mkdir(args.outf)
    torch.cuda.manual_seed(0)
    torch.cuda.set_device(args.gpu)

    out_dist_list = [
        'skin_cli', 'skin_derm', 'corrupted', 'corrupted_70', 'imgnet', 'nct',
        'final_test'
    ]

    # load networks
    if args.net_type == 'densenet_121':
        model = densenet_121.Net(models.densenet121(pretrained=False), 8)
        ckpt = torch.load("../checkpoints/densenet-121/checkpoint.pth")
        model.load_state_dict(ckpt['model_state_dict'])
        model.eval()
        model.cuda()
    elif args.net_type == 'mobilenet':
        model = mobilenet.Net(models.mobilenet_v2(pretrained=False), 8)
        ckpt = torch.load("../checkpoints/mobilenet/checkpoint.pth")
        model.load_state_dict(ckpt['model_state_dict'])
        model.eval()
        model.cuda()
        print("Done!")
    elif args.net_type == 'resnet_50':
        model = resnet_50.Net(models.resnet50(pretrained=False), 8)
        ckpt = torch.load("../checkpoints/resnet-50/checkpoint.pth")
        model.load_state_dict(ckpt['model_state_dict'])
        model.eval()
        model.cuda()
        print("Done!")
    elif args.net_type == 'vgg_16':
        model = vgg_16.Net(models.vgg16_bn(pretrained=False), 8)
        ckpt = torch.load("../checkpoints/vgg-16/checkpoint.pth")
        model.load_state_dict(ckpt['model_state_dict'])
        model.eval()
        model.cuda()
        print("Done!")
    else:
        raise Exception(f"There is no net_type={args.net_type} available.")

    in_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    print('load model: ' + args.net_type)

    # load dataset
    print('load target data: ', args.dataset)
    train_loader, test_loader = data_loader.getTargetDataSet(
        args.dataset, args.batch_size, in_transform, args.dataroot)

    # set information about feature extaction
    model.eval()
    temp_x = torch.rand(2, 3, 224, 224).cuda()
    temp_x = Variable(temp_x)
    temp_list = model.feature_list(temp_x)[1]
    num_output = len(temp_list)
    feature_list = np.empty(num_output)
    count = 0
    for out in temp_list:
        feature_list[count] = out.size(1)
        count += 1

    print('get sample mean and covariance')
    sample_mean, precision = lib_generation.sample_estimator(
        model, args.num_classes, feature_list, train_loader)

    print('get Mahalanobis scores')
    m_list = [0.0, 0.01, 0.005, 0.002, 0.0014, 0.001, 0.0005]

    for magnitude in m_list:
        print('Noise: ' + str(magnitude))
        for i in range(num_output):
            M_in = lib_generation.get_Mahalanobis_score(model, test_loader, args.num_classes, args.outf, \
                                                        True, args.net_type, sample_mean, precision, i, magnitude)
            M_in = np.asarray(M_in, dtype=np.float32)
            if i == 0:
                Mahalanobis_in = M_in.reshape((M_in.shape[0], -1))
            else:
                Mahalanobis_in = np.concatenate(
                    (Mahalanobis_in, M_in.reshape((M_in.shape[0], -1))),
                    axis=1)

        for out_dist in out_dist_list:
            out_test_loader = data_loader.getNonTargetDataSet(
                out_dist, args.batch_size, in_transform, args.dataroot)
            print('Out-distribution: ' + out_dist)
            for i in range(num_output):
                M_out = lib_generation.get_Mahalanobis_score(model, out_test_loader, args.num_classes, args.outf, \
                                                             False, args.net_type, sample_mean, precision, i, magnitude)
                M_out = np.asarray(M_out, dtype=np.float32)
                if i == 0:
                    Mahalanobis_out = M_out.reshape((M_out.shape[0], -1))
                else:
                    Mahalanobis_out = np.concatenate(
                        (Mahalanobis_out, M_out.reshape((M_out.shape[0], -1))),
                        axis=1)

            Mahalanobis_in = np.asarray(Mahalanobis_in, dtype=np.float32)
            Mahalanobis_out = np.asarray(Mahalanobis_out, dtype=np.float32)
            Mahalanobis_data, Mahalanobis_labels = lib_generation.merge_and_generate_labels(
                Mahalanobis_out, Mahalanobis_in)
            file_name = os.path.join(
                args.outf, 'Mahalanobis_%s_%s_%s.npy' %
                (str(magnitude), args.dataset, out_dist))
            Mahalanobis_data = np.concatenate(
                (Mahalanobis_data, Mahalanobis_labels), axis=1)
            np.save(file_name, Mahalanobis_data)
Esempio n. 12
0
"""
Simple Flask server to return predictions from trained densenet model
"""

from flask import Flask, request, redirect, flash
import torch
from torchvision.transforms.functional import to_tensor
from PIL import Image

from models import densenet121

app = Flask(__name__)
cp = torch.load('models/cv/checkpoint/best_model.pth.tar')
net = densenet121()
net.load_state_dict(cp['net'])
net.eval()


def classify_image(x):
    """
    :param x: image
    :return: class prediction from densenet model
    """
    outputs = net(x)
    _, predicted = outputs.max(1)
    return predicted


@app.route('/predict', methods=['POST'])
def predict():
    """