コード例 #1
0
    def __init__(self,
                 layers=50,
                 bins=(1, 2, 3, 6),
                 classes=2,
                 dropout=0.1,
                 zoom_factor=8,
                 use_ppm=True,
                 is_training=True,
                 criterion=nn.BCELoss()
                 ):  #criterion=nn.CrossEntropyLoss(ignore_index=255)):
        super(PSPNet, self).__init__()
        assert layers in [50, 101, 152]
        assert 2048 % len(bins) == 0
        assert classes > 1
        assert zoom_factor in [1, 2, 4, 8]

        self.criterion = criterion
        self.classes = classes
        self.use_ppm = use_ppm
        self.zoom_factor = zoom_factor
        self.is_training = is_training

        if layers == 50:
            resnet = ResNet.resnet50()
        elif layers == 101:
            resnet = ResNet.resnet101()
        elif layers == 152:
            resnet = ResNet.resnet152()

        self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
                                    resnet.conv2, resnet.bn2, resnet.relu,
                                    resnet.conv3, resnet.bn3, resnet.relu,
                                    resnet.maxpool)
        self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4

        for n, m in self.layer3.named_modules():
            if 'conv2' in n:
                m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)

        fea_dim = 2048
        if self.use_ppm:
            self.ppm = PyramidPoolingModule(fea_dim, int(fea_dim / len(bins)),
                                            bins)
            fea_dim *= 2
        self.cls = nn.Sequential(
            nn.Conv2d(fea_dim, 512, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(512), nn.ReLU(inplace=True),
            nn.Dropout2d(p=dropout), nn.Conv2d(512,
                                               self.classes,
                                               kernel_size=1))
        if self.training:
            self.aux = nn.Sequential(
                nn.Conv2d(1024, 256, kernel_size=3, padding=1, bias=False),
                nn.BatchNorm2d(256), nn.ReLU(inplace=True),
                nn.Dropout2d(p=dropout),
                nn.Conv2d(256, self.classes, kernel_size=1))
コード例 #2
0
ファイル: Main.py プロジェクト: grayinfinity/Connect4Zero
def load_or_create_neural_net():
    file_path = './best_model_resnet.pth'
    if os.path.exists(file_path):
        print('loading already trained model')
        best_player_so_far = ResNet.resnet18()
        best_player_so_far.load_state_dict(torch.load(file_path))

    else:
        print('Trained model doesnt exist. Starting from scratch.')
        best_player_so_far = ResNet.resnet18()

    best_player_so_far.eval()
    return best_player_so_far
コード例 #3
0
def get_resnet_model(resnet_type=152):
    """
    A function that returns the required pre-trained resnet model
    :param resnet_number: the resnet type
    :return: the pre-trained model
    """
    if resnet_type == 18:
        return ResNet.resnet18(pretrained=True, progress=True)
    elif resnet_type == 50:
        return ResNet.wide_resnet50_2(pretrained=True, progress=True)
    elif resnet_type == 101:
        return ResNet.resnet101(pretrained=True, progress=True)
    else:  #152
        return ResNet.resnet152(pretrained=True, progress=True)
コード例 #4
0
 def __init__(self, segment_classes, level_classes, img_scale):
     super(MDP_Net, self).__init__()
     resnet = ResNet.resnet18()
     self.segment_classes = segment_classes
     self.level_classes = level_classes
     # for param in resnet.parameters():
     #     param.requires_grad = False
     self.layer0 = torch.nn.Sequential(resnet.conv1, resnet.bn1,
                                       resnet.relu, resnet.maxpool)
     self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4
     plane = resnet.fc.in_features
     self.deconv1 = resnet_deconv(inplanes=plane,
                                  layers=[2, 2, 2, 2],
                                  out_classes=segment_classes,
                                  init_scale=img_scale)
     self.deconv2 = resnet_deconv(inplanes=plane,
                                  layers=[2, 2, 2, 2],
                                  out_classes=1,
                                  init_scale=img_scale)
     self.conv = torch.nn.Conv2d(in_channels=segment_classes + 1,
                                 kernel_size=3,
                                 out_channels=level_classes,
                                 padding=1)
     self.droutput1 = torch.nn.Dropout(p=0.8)
     self.droutput2 = torch.nn.Dropout(p=0.8)
     # self.sigmoid1 = torch.nn.Sigmoid()
     self.sigmoid2 = torch.nn.Sigmoid()
コード例 #5
0
def RetNet_Vision_Simple(inputs, keep_prob, seq_len, scope=None, reuse=None):
    with tf.variable_scope(scope, 'Vision', [inputs], reuse=reuse):
        seq_net = []
        for idx in range(seq_len):
            net = ResNet.inference(inputs[:,idx+1:LEFT_CONTEXT+idx+1,:,:,:],VISION_FEATURE_SIZE,keep_prob)
            seq_net.append(net)
        return tf.stack(seq_net, axis=1)
コード例 #6
0
    def __init__(self, pretrained=True, **kwargs):
        super(UNetResNet34, self).__init__(**kwargs)
        self.resnet = ResNet.resnet34(pretrained=pretrained)

        self.conv1 = nn.Sequential(
            self.resnet.conv1,
            self.resnet.bn1,
            self.resnet.relu,
        )  # 64

        self.encoder1 = self.resnet.layer1  # 64
        self.encoder2 = self.resnet.layer2  # 128
        self.encoder3 = self.resnet.layer3  # 256
        self.encoder4 = self.resnet.layer4  # 512

        self.center = nn.Sequential(
            nn.MaxPool2d(kernel_size=2, stride=2),
            ConvBn2d(512, 1024, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            ConvBn2d(1024, 1024, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
        )

        self.decoder5 = Decoder(512 + 512, 512, 512, convT_channels=1024)
        self.decoder4 = Decoder(256 + 256, 256, 256, convT_channels=512)
        self.decoder3 = Decoder(128 + 128, 128, 128, convT_channels=256)
        self.decoder2 = Decoder(64 + 64, 64, 64, convT_channels=128)
        self.decoder1 = Decoder(32 + 64, 64, 32, convT_channels=64)

        self.logit = nn.Sequential(
            nn.Conv2d(32, 32, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(32, 1, kernel_size=1, padding=0),
        )
コード例 #7
0
    def set_data(self):
        self.initialized = False

        mnist = input_data.read_data_sets("MNIST_data", reshape=False)
        self.x_train, self.y_train = mnist.train.images, mnist.train.labels
        self.x_validation, self.y_validation = mnist.validation.images, mnist.validation.labels
        self.x_test, self.y_test = mnist.test.images, mnist.test.labels

        assert len(self.x_train) == len(self.y_train)
        assert len(self.x_validation) == len(self.y_validation)
        assert len(self.x_test) == len(self.y_test)

        self.image_batch.append(None)
        self.image_batch += self.x_train[0].shape

        with tf.variable_scope(self.variable_scope_base):
            self.x_tensor = tf.placeholder(tf.float32, self.image_batch)
            self.y_tensor = tf.placeholder(tf.int32)
            self.y_one_hot_tensor = tf.one_hot(indices=tf.cast(
                self.y_tensor, tf.int32),
                                               depth=10)

        resnet = ResNet.ResNet(self.resnet_size)
        if resnet.build_resnet(self.x_tensor):
            self.resnet = resnet.get_resnet()
        else:
            return

        self.initialized = True
コード例 #8
0
    def set_model(self, N = 3, num_features = 2, num_classes = 2, func_f = torch.tanh, func_c =F.softmax, weights = None, bias = None, gpu=False, choice = None, gamma = 0.01):
        """
        allows the user to set the model choices defaults to resnet
        'a' - antisymmetric resnet
        'v' - verlet integration
        'l' - leapfrog integration
        """
        self.choice = choice
        #choosing model

        if choice == None:
            choice = self.choice

        #choose resnet
        if choice == 'a':
            print("a")
            self.model = anti.AntiSymResNet(self.device,N, num_features, num_classes, func_f, func_c, weights, bias, gamma, gpu)

        elif choice == 'v':
            print("v")
            self.model = ver.Verlet(self.device,N, num_features, num_classes, func_f, func_c, weights, bias,  gpu)

        elif choice == 'l':
            print("l")
            self.model = lp.Leapfrog(self.device,N, num_features, num_classes, func_f, func_c, weights, bias, gpu)

        else:
            print("r")
            self.model = res.ResNet(self.device,N, num_features, num_classes, func_f, func_c, weights, bias, gpu)

        #set parameters to gpu
        if gpu == True:
            self.model.to(self.device)
コード例 #9
0
ファイル: go.py プロジェクト: 18463101558/hot
def Predict():
    NameList,LabelList=get_img_list()
    ImageData=load_all_image(img_path,NameList, HEIGHT, WIDTH, CHANNELS)#加载所有图片到内存里面
    num_train_image = len(NameList)#记录了所有图片数量
    sess=tf.Session()
    images = tf.placeholder(tf.float32, shape = [None,  HEIGHT, WIDTH,CHANNELS])
    labels = tf.placeholder(tf.float32, shape=[None, 1])
    # 建立网络模型
    resnet_model = resnet.ResNet(ResNet_npy_path = model_path)
    resnet_model.build(images, 1)

    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())#初始化所有参数
    saver.restore(sess, model_path)
    print("begin predicting!")

    record=[]
    index=0
        minibatches = random_mini_batches(num_train_image, 1, random=False)
        for minibatch in minibatches:
            (minibatch_X, minibatch_Y) = get_minibatch(minibatch, LabelList, HEIGHT, WIDTH, CHANNELS, 1,ImageData)
            resnet_model.set_is_training(False)
            recordprob= sess.run(resnet_model.prob, feed_dict={images: minibatch_X})
            print("这是一个批次!")
            for  onerecord in recordprob:
                print("imgname:", NameList[index],  '----------possiblity:',onerecord[0])
                saveonerecord = [str(NameList[index]), onerecord[0]]
                index = index + 1
                record.append(saveonerecord)
        print("predicting over!")
        analysis(record, 'predict的验证结果.csv')
        return record
コード例 #10
0
def main():
	args = get_args()
	root_dir = args.root_dir
	imgs = list(os.walk(root_dir))[0][2]

	save_dir = args.save_dir
	num_classes = 100 # CIFAR100
	model = ResNet.resnet(arch='resnet50', pretrained=False, num_classes=num_classes,
		use_att=args.use_att, att_mode=args.att_mode)
	#model = nn.DataParallel(model)
	#print(model)

	if args.resume:
		if os.path.isfile(args.resume):
			print(f'=> loading checkpoint {args.resume}')
			checkpoint = torch.load(args.resume)
			best_acc5 = checkpoint['best_acc5']
			model.load_state_dict(checkpoint['state_dict'], strict=False)
			print(f"=> loaded checkpoint {args.resume} (epoch {checkpoint['epoch']})")
			print(f'=> best accuracy {best_acc5}')
		else:
			print(f'=> no checkpoint found at {args.resume}')

	model_dict = get_model_dict(model, args.type)
	normalizer = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

	for img_name in imgs:
		img_path = os.path.join(root_dir, img_name)
		pil_img = PIL.Image.open(img_path)
	
		torch_img = torch.from_numpy(np.asarray(pil_img))
		torch_img = torch_img.permute(2, 0, 1).unsqueeze(0)
		torch_img = torch_img.float().div(255)
		torch_img = F.interpolate(torch_img, size=(224, 224), mode='bilinear', align_corners=False)

		normalized_torch_img = normalizer(torch_img)

		gradcam = GradCAM(model_dict, True)
		gradcam_pp = GradCAMpp(model_dict, True)

		mask, _ = gradcam(normalized_torch_img)
		heatmap, result = visualize_cam(mask, torch_img)

		mask_pp, _ = gradcam_pp(normalized_torch_img)
		heatmap_pp, result_pp = visualize_cam(mask_pp, torch_img)
		
		images = torch.stack([torch_img.squeeze().cpu(), heatmap, heatmap_pp, result, result_pp], 0)

		images = make_grid(images, nrow=1)

		if args.use_att:
			save_dir = os.path.join(args.save_dir, 'att')
		else:
			save_dir = os.path.join(args.save_dir, 'no_att')

		os.makedirs(save_dir, exist_ok=True)
		output_name = img_name
		output_path = os.path.join(save_dir, output_name)

		save_image(images, output_path)
コード例 #11
0
    def __init__(self, pretrained=True, activation='relu', **kwargs):
        super(RefineNetResNet34, self).__init__(**kwargs)
        if activation == 'relu':
            self.activation = nn.ReLU(inplace=True)
        elif activation == 'elu':
            self.activation = ELU_1(inplace=True)

        self.resnet = ResNet.resnet34(pretrained=pretrained,
                                      activation=self.activation)

        self.conv1 = nn.Sequential(
            self.resnet.conv1,
            self.resnet.bn1,
            self.resnet.activation,
        )  # 64

        self.encoder1 = self.resnet.layer1  # 64
        self.encoder2 = self.resnet.layer2  # 128
        self.encoder3 = self.resnet.layer3  # 256
        self.encoder4 = self.resnet.layer4  # 512

        self.refine4 = RefineNetBlock(512, 512, skip_in=None, pool_mode='avg')
        self.refine3 = RefineNetBlock(256, 256, skip_in=512, pool_mode='avg')
        self.refine2 = RefineNetBlock(128, 128, skip_in=256, pool_mode='avg')
        self.refine1 = RefineNetBlock(64, 64, skip_in=128, pool_mode='avg')
        self.refine0 = RefineNetBlock(64, 32, skip_in=64, pool_mode='avg')

        self.logit = nn.Sequential(
            ResidualConvUnit(32),
            ResidualConvUnit(32),
            nn.Conv2d(32, 1, kernel_size=1, padding=0),
        )
コード例 #12
0
    def __init__(self, num_classes, loss={'softmax,metric'}):
        super(PCB, self).__init__()
        class_num = num_classes
        self.part = 4  # We cut the pool5 to 6 parts
        model_ft = torchvision.models.resnet50(pretrained=False)
        self.model = model_ft
        self.avgpool = nn.AdaptiveAvgPool2d((self.part, 1))
        self.dropout = nn.Dropout(p=0.5)
        # remove the final downsample
        self.model.layer4[0].downsample[0].stride = (1, 1)
        self.model.layer4[0].conv2.stride = (1, 1)
        # define 6 classifiers
        for i in range(self.part):
            name = 'classifier' + str(i)
            setattr(
                self, name,
                ClassBlock(2048,
                           class_num,
                           droprate=0.5,
                           relu=False,
                           bnorm=True,
                           num_bottleneck=256))
        #全局特征分类器
        self.globePool = nn.AdaptiveAvgPool2d((1, 1))
        self.classifier_globe = ClassBlock(2048, class_num, 0.5)
        # 第二层特征分类器
        self.classifier_21 = ClassBlock(2048, class_num, 0.5)
        self.classifier_22 = ClassBlock(2048, class_num, 0.5)

        self.SA1 = at.SpatialAttn()
コード例 #13
0
def chooseModel(dataset,
                device,
                N,
                func_f,
                func_c,
                gpu,
                choice,
                last=True,
                conv=False,
                first=True,
                in_chns=1,
                n_filters=6):

    last = True

    num_features, num_classes, in_channels = dataloader.getDims(dataset)

    weights, bias = None, None

    if choice == 'v':
        print("v")
        model = ver.Verlet(device, N, num_features, num_classes, func_f,
                           func_c, weights, bias, gpu, last, conv, first,
                           in_channels, n_filters)
    else:
        print("r")
        model = res.ResNet(device, N, num_features, num_classes, func_f,
                           func_c, weights, bias, gpu, last, conv, first,
                           in_channels, n_filters)
    return model
コード例 #14
0
 def __init__(self):
     super(YOLOV0, self).__init__()
     resnet = ResNet.resnet18(pretrained=True)
     self.conv1 = resnet.conv1
     self.bn1 = resnet.bn1
     self.relu = resnet.relu
     self.maxpool = resnet.maxpool
     self.layer1 = resnet.layer1
     self.layer2 = resnet.layer2
     self.layer3 = resnet.layer3
     self.layer4 = resnet.layer4
     self.avgpool = nn.AdaptiveAvgPool2d((14, 14))
     # 决策层:检测层
     self.detector = nn.Sequential(
         nn.Linear(512 * 14 * 14, 4096),
         nn.ReLU(True),
         nn.Dropout(),
         # nn.Linear(4096,1470),
         nn.Linear(4096, 24 * 14 * 14),
     )
     for m in self.modules():
         if isinstance(m, nn.Conv2d):
             nn.init.kaiming_normal_(m.weight,
                                     mode='fan_out',
                                     nonlinearity='relu')
             if m.bias is not None:
                 nn.init.constant_(m.bias, 0)
         elif isinstance(m, nn.BatchNorm2d):
             nn.init.constant_(m.weight, 1)
             nn.init.constant_(m.bias, 1)
         elif isinstance(m, nn.Linear):
             nn.init.normal_(m.weight, 0, 0.01)
             nn.init.constant_(m.bias, 0)
コード例 #15
0
def resnetTrain():
    ############################################################################
    # use ResNet to training data
    ############################################################################

    # Network architecture params
    net_architecture_params = {
        'num_classes': 2,
        'num_filters': 128,  #128
        'num_blocks': 3,
        'num_sub_blocks': 2,
        'use_max_pool': True,
        'input_shape': (256, 256, 3)
    }

    data_source_path = "D:/kaggle/detection/train_transform_256_2"

    mymodel = ResNet.ResNetModel(data_source_path, training_params,
                                 net_architecture_params)

    mymodel.modelDefinition()
    mymodel.modelSaveInfo('resnet_model.h5')
    mymodel.modelTrain()
    mymodel.loadSavedModel()
    evaluation = mymodel.modelEvaluate()
    evaluation_s = mymodel.modelEvaluate(saved_model=True)
    mymodel.modelReset()
    return evaluation, evaluation_s
コード例 #16
0
def ValidRed2D(testloader, Bone, Network, path):
    dice_value = []

    if Network == 'UNet':
        net = UNet().to(device)
    elif Network == 'ResNet':
        net = ResNet(BasicBlock, [3, 4, 6]).to(device)
    net.load_state_dict(torch.load(path))
    for i, data in enumerate(testloader, 0):
        inputs, labels = data
        inputs = inputs.to(device)
        labels = labels.to(device)

        outputs = net(inputs)
        dice = 0.0
        pr = labels[0].cpu().detach().numpy()
        gt = outputs[0].cpu().detach().numpy()
        gt[0, :, :][gt[0, :, :] <= 0.5] = 0
        gt[0, :, :][gt[0, :, :] > 0.5] = 1
        dice = dice + np.abs(
            computeQualityMeasures(pr[0, :, :].flatten(),
                                   gt[0, :, :].flatten()))

        if (i == 1600):
            plt.figure()
            plt.imshow(inputs[0, 0, :, :].cpu().detach().numpy(),
                       cmap=plt.cm.gray,
                       interpolation="nearest",
                       vmin=-3,
                       vmax=2)
            plt.imshow(outputs[0, 0, :, :].cpu().detach().numpy(),
                       'OrRd',
                       interpolation='none',
                       alpha=0.4)
            plt.savefig(os.path.join(
                ResultsDirectory, Bone, 'Images',
                'patches_' + Network + '_' + Bone + str(i) + '.png'),
                        dpi=150)

            plt.figure()
            plt.imshow(inputs[0, 0, :, :].cpu().detach().numpy(),
                       cmap=plt.cm.gray,
                       interpolation="nearest",
                       vmin=-3,
                       vmax=2)
            plt.imshow(labels[0, 0, :, :].cpu().detach().numpy(),
                       'OrRd',
                       interpolation='none',
                       alpha=0.4)
            plt.savefig(os.path.join(
                ResultsDirectory, Bone, 'Images',
                'patches_Truth_' + Network + '_' + Bone + str(i) + '.png'),
                        dpi=150)
        dice_value.append(dice)

    np.savetxt(os.path.join(ResultsDirectory, Bone, 'DICE_test.txt'),
               dice_value)
    print('Standard Deviation:' + str(statistics.stdev(dice_value)))
    print('Mean:' + str(statistics.mean(dice_value)))
コード例 #17
0
def test_task1(root_path):
    '''
    :param root_path: root path of test data, e.g. ./dataset/task1/test/
    :return results: a dict of classification results
    results = {'audio_0000.pkl': 1, ‘audio_0001’: 3, ...}
    class number:
        ‘061_foam_brick’: 0
        'green_basketball': 1
        'salt_cylinder': 2
        'shiny_toy_gun': 3
        'stanley_screwdriver': 4
        'strawberry': 5
        'toothpaste_box': 6
        'toy_elephant': 7
        'whiteboard_spray': 8
        'yellow_block': 9
    '''
    results = dict()

    os.chdir(os.path.split(os.path.realpath(__file__))[0])
    audio_transforms = transforms.Compose([transforms.ToTensor()])
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    label_list = [9, 2, 3, 8, 7, 6, 5, 0, 4, 1]

    model = ResNet.resnet18(num_classes=10)
    model = model.to(device)
    model = torch.nn.DataParallel(model)
    model.load_state_dict(torch.load('./model/resnet18.pth'))
    model.eval()

    for sample in os.listdir(root_path):
        data = np.load(os.path.join(root_path, sample),
                       allow_pickle=True)['audio']
        for i in range(4):
            S = librosa.resample(data[:, i], orig_sr=44100, target_sr=11000)
            S = np.abs(librosa.stft(S[5650:-5650], n_fft=510, hop_length=128))
            S = np.log10(S + 0.0000001)
            S = np.clip(S, -5, 5)
            S -= np.min(S)
            S = 255 * (S / np.max(S))
            if S.shape[-1] != 256:
                S = np.pad(S, ((0, 0), (int(np.ceil((256 - S.shape[-1]) / 2)),
                                        int(np.floor(
                                            (256 - S.shape[-1]) / 2)))))
            if i == 0:
                feature = np.uint8(S)[:, :, np.newaxis]
            else:
                feature = np.concatenate(
                    (np.uint8(S)[:, :, np.newaxis], feature), axis=-1)

        X = audio_transforms(feature)
        X = X.to(device)
        y_hat = torch.softmax(model(X.unsqueeze(0)),
                              dim=-1).argmax(dim=1).cpu().item()

        results[sample] = label_list[y_hat]
    return results
コード例 #18
0
ファイル: Test.py プロジェクト: Suqi2017/-
def pre(vir_path):

    checkpoint = t.load('./checkpoint3', map_location=t.device('cpu'))
    transforms = T.Compose([T.Resize((100, 100)), T.Grayscale(), T.ToTensor()])
    model = ResNet(ResidualBlock)
    model.load_state_dict(checkpoint['model_state_dict'])
    model.eval()

    img = transforms(Image.open(vir_path))
    img = t.unsqueeze(img, dim=0)

    if t.cuda.is_available():
        model = model.cuda()
        img = img.cuda()

    output = model(img)
    _, pred = t.max(output.data, 1)

    num = pred.item()
    char = list(char_dict.keys())[list(char_dict.values()).index(num)]

    print(
        '\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n您输入的汉字是:{}'
        .format(char))

    if char not in ass_dict.keys():
        print('我脑袋小,这个字还暂时没有联想到后面是什么……嘻嘻')
    else:
        print('联想:{}'.format(ass_dict[char]))
コード例 #19
0
def net_from_type_string(net_type, num_classes):
    if net_type == 'alexnet':
        return AlexNet(num_classes)
    elif net_type == 'vgg16':
        return AlexNet(num_classes)
    elif net_type == 'resnet':
        return ResNet.make_resnet(50, num_classes)
    print('error: unkown net type')
    return None
コード例 #20
0
 def __init__(self, num_classes=31):
     super(DeepMEDA, self).__init__()
     self.feature_layers = ResNet.resnet50(True)
     self.mmd_marginal = mmd.MMD_loss()
     if bottle_neck:
         self.bottle = nn.Linear(2048, 256)
         self.cls_fc = nn.Linear(256, num_classes)
     else:
         self.cls_fc = nn.Linear(2048, num_classes)
コード例 #21
0
def ValidRed3D(testloader, Bone, Network, path, sujet):
    volume3D_terrain = []
    volume3D_exp = []

    if Network == 'UNet':
        net = UNet().to(device)
    elif Network == 'ResNet':
        net = ResNet(BasicBlock, [3, 4, 6]).to(device)
    net.load_state_dict(torch.load(path))

    for i, data in enumerate(testloader, 0):
        inputs, labels = data
        inputs = inputs.to(device)
        labels = labels.to(device)

        outputs = net(inputs)
        gt = outputs[0].cpu().detach().numpy()
        gt[0, :, :][gt[0, :, :] <= 0.5] = 0
        gt[0, :, :][gt[0, :, :] > 0.5] = 1

        volume3D_terrain.append(labels[0, 0].cpu().detach().numpy())
        volume3D_exp.append(gt[0])

        if i == 30:
            plt.figure()
            plt.imshow(inputs[0, 0, :, :].cpu().detach().numpy(),
                       'gray',
                       interpolation='none')
            plt.imshow(outputs[0, 0, :, :].cpu().detach().numpy(),
                       'OrRd',
                       interpolation='none',
                       alpha=0.6)
            plt.savefig(os.path.join(
                ResultsDirectory, Bone, 'Images',
                '2DSlices_' + Network + '_' + Bone + sujet + str(i) + '.png'),
                        dpi=150)

            plt.figure()
            plt.imshow(inputs[0, 0, :, :].cpu().detach().numpy(),
                       'gray',
                       interpolation='none')
            plt.imshow(labels[0, 0, :, :].cpu().detach().numpy(),
                       'OrRd',
                       interpolation='none',
                       alpha=0.6)
            plt.savefig(os.path.join(
                ResultsDirectory, Bone, 'Images', '2DSlices_Truth_' + Network +
                '_' + Bone + sujet + str(i) + '.png'),
                        dpi=150)

    volume3D_terrain = np.array(volume3D_terrain)
    volume3D_exp = np.array(volume3D_exp)
    dice = np.abs(
        computeQualityMeasures(volume3D_terrain.flatten(),
                               volume3D_exp.flatten()))
    print('dice:' + str(dice))
コード例 #22
0
def test_resnet(inputs: tp.Numpy.Placeholder(
    (64, 3, 26, 110, 110))) -> Tuple[tp.Numpy, tp.Numpy]:
    resnet2d = getresnet.getResnet()
    layer = resnet2d[0]
    c3d_idx = [[], [0, 2], [0, 2, 4], []]
    nl_idx = [[], [1, 3], [1, 3, 5], []]
    y, f = ResNet.ResNet503D(10, AP3D.APP3DC, c3d_idx,
                             nl_idx).build_network(inputs)

    return (y, f)
コード例 #23
0
 def __init__(self, num_classes=31, bottle_neck=True):
     super(DSAN, self).__init__()
     self.feature_layers = ResNet.resnet50(True)
     self.lmmd_loss = lmmd.LMMD_loss(class_num=num_classes)
     self.bottle_neck = bottle_neck
     if bottle_neck:
         self.bottle = nn.Linear(2048, 256)
         self.cls_fc = nn.Linear(256, num_classes)
     else:
         self.cls_fc = nn.Linear(2048, num_classes)
コード例 #24
0
ファイル: test.py プロジェクト: lonelybeansprouts/transfer
def load_model(name='alexnet'):
    if name == 'alexnet':
        model = AlexNet.AlexNetFc(pretrained=False, num_classes=31)
        # torch.nn.init.xavier_uniform_(model.nfc.weight)
        # torch.nn.init.constant_(model.nfc.bias, 0.1)
    elif name == 'resnet':
        model = resnet.myresnet(pretrained=False, num_classes=31)
        # torch.nn.init.xavier_uniform_(model.nfc.weight.data)
        # torch.nn.init.constant_(model.nfc.bias.data, 0.01)
    return model
コード例 #25
0
ファイル: Main.py プロジェクト: grayinfinity/Connect4Zero
def load_or_create_neural_net():
    file_path = './best_model_resnet.pth'
    best_player_so_far = ResNet.resnet18()
    if os.path.exists(file_path):
        print('loading already trained model')
        best_player_so_far.load_state_dict(torch.load(file_path))

    best_player_so_far.eval()

    return best_player_so_far
コード例 #26
0
def load_or_create_neural_net():
    if config.net == 'resnet':
        file_path = './best_model_resnet.pth'
    elif config.net == 'densenet':
        file_path = './best_model_densenet.pth'
    else:
        print(
            'Neural Net type not understood. Chose between resnet or densenet in config.py'
        )
        raise ValueError

    if config.net == 'resnet':
        if os.path.exists(file_path):
            print('loading already trained model')
            time.sleep(0.3)

            best_player_so_far = ResNet.resnet18()
            best_player_so_far.load_state_dict(torch.load(file_path))
            best_player_so_far.eval()

        else:
            print('Trained model doesnt exist. Starting from scratch.')
            time.sleep(0.3)

            best_player_so_far = ResNet.resnet18()
            best_player_so_far.eval()

    if config.net == 'densenet':
        if os.path.exists(file_path):
            print('loading already trained model')
            time.sleep(0.3)

            best_player_so_far = ResNet.densenet()
            best_player_so_far.load_state_dict(torch.load(file_path))
            best_player_so_far.eval()

        else:
            print('Trained model doesnt exist. Starting from scratch.')
            time.sleep(0.3)
            best_player_so_far = ResNet.densenet()
            best_player_so_far.eval()

    return best_player_so_far
コード例 #27
0
ファイル: office31.py プロジェクト: Silflame/transferlearning
def load_model(name='alexnet'):
    if name == 'alexnet':
        model = AlexNet.AlexNetFc(pretrained=True, num_classes=31)
        # torch.nn.init.xavier_uniform_(model.nfc.weight)
        # torch.nn.init.constant_(model.nfc.bias, 0.1)
    elif name == 'resnet':
        model = resnet.myresnet(pretrained=True, num_classes=31)
        # torch.nn.init.xavier_uniform_(model.nfc.weight.data)
        # torch.nn.init.constant_(model.nfc.bias.data, 0.01)
    return model
コード例 #28
0
def Objectinfer(inferprogram, exe, ActionGather, boundGather, colorbackground,
                colorbackgroundnow):
    infer_result = []
    for i in range(len(ActionGather)):
        if ActionGather[i] == 1:
            processer.Objectsave(colorbackgroundnow, boundGather[i], i)
        elif ActionGather[i] == -1:
            processer.Objectsave(colorbackground, boundGather[i], i)
    for i in range(len(ActionGather)):
        infer_result.append(model.infer(inferprogram, exe, str(i) + ".jpg"))
    return infer_result
コード例 #29
0
    def __init__(self, pretrained=True, **kwargs):
        super(UNetResNet50_SE, self).__init__(**kwargs)
        self.resnet = ResNet.resnet50(pretrained=pretrained)

        self.conv1 = nn.Sequential(
            self.resnet.conv1,
            self.resnet.bn1,
            self.resnet.relu,
        )  # 64

        self.encoder1 = nn.Sequential(self.resnet.layer1,
                                      scSqueezeExcitationGate(256))  # 256
        self.encoder2 = nn.Sequential(self.resnet.layer2,
                                      scSqueezeExcitationGate(512))  # 512
        self.encoder3 = nn.Sequential(self.resnet.layer3,
                                      scSqueezeExcitationGate(1024))  # 1024
        self.encoder4 = nn.Sequential(self.resnet.layer4,
                                      scSqueezeExcitationGate(2048))  # 2048

        self.center = nn.Sequential(
            nn.MaxPool2d(kernel_size=2, stride=2),
            ConvBn2d(2048, 1024, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            ConvBn2d(1024, 1024, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
        )

        self.decoder5 = Decoder(512 + 1024 * 2,
                                512,
                                512,
                                convT_channels=1024,
                                SE=True)
        self.decoder4 = Decoder(256 + 512 * 2,
                                256,
                                256,
                                convT_channels=512,
                                SE=True)
        self.decoder3 = Decoder(128 + 256 * 2,
                                128,
                                128,
                                convT_channels=256,
                                SE=True)
        self.decoder2 = Decoder(64 + 128 * 2,
                                64,
                                64,
                                convT_channels=128,
                                SE=True)
        self.decoder1 = Decoder(32 + 64, 64, 32, convT_channels=64, SE=True)

        self.logit = nn.Sequential(
            nn.Conv2d(32, 32, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(32, 1, kernel_size=1, padding=0),
        )
コード例 #30
0
 def run(self):
     wx.CallAfter(pub.sendMessage,
                  "report101",
                  message="Prediction process started. \nLoading........"
                  )  # Sending report to the Text Box
     import ResNet
     result = ResNet.predict(
         self.path, self.model_path, self.index_file_path,
         self.main_ui)  # This calls the ResNet Prediction Class
     wx.CallAfter(pub.sendMessage, "report101",
                  message=result)  # # Sending report to the Text Box
コード例 #31
0
def load_student_rnn(num_classes):
    student = ResNet.WideResNet(
        depth=config.student_rnn['depth'],
        num_classes=num_classes,
        widen_factor=config.student_rnn['widen_factor'],
        input_features=config.student_rnn['input_features'],
        output_features=config.student_rnn['output_features'],
        dropRate=config.student_rnn['dropRate'],
        strides=config.student_rnn['strides'])

    return student