コード例 #1
0
def make_model(arch, nclass):
    num = nclass 
    if arch == 'efficientNetb0':
        model_ft = EfficientNet.from_pretrained('efficientnet-b0')
    elif arch == 'efficientNetb1':
        model_ft = EfficientNet.from_pretrained('efficientnet-b1')
    elif arch == 'efficientNetb2':
        model_ft = EfficientNet.from_pretrained('efficientnet-b2')
    elif arch == 'efficientNetb3':
        model_ft = EfficientNet.from_pretrained('efficientnet-b3')
    elif arch == 'efficientNetb4':
        model_ft = EfficientNet.from_pretrained('efficientnet-b4')
    elif arch == 'efficientNetb5':
        model_ft = EfficientNet.from_pretrained('efficientnet-b5')
    elif arch == 'efficientNetb6':
        model_ft = EfficientNet.from_pretrained('efficientnet-b6')
    elif arch == 'efficientNetb7':
        model_ft = EfficientNet.from_pretrained('efficientnet-b7')
    else:
        model_ft = EfficientNet.from_pretrained('efficientnet-b3')
    num_ftrs = model_ft._fc.in_features
    model_ft._fc = nn.Linear(num_ftrs, num)
    return model_ft 

    
コード例 #2
0
 def __init__(self, phi: Phi, out_channels: int, pretrained: bool = False):
     super().__init__()
     model_name = f"efficientnet-b{phi}"
     if pretrained:
         self.module = EfficientNet.from_pretrained(model_name)
     else:
         self.module = EfficientNet.from_name(model_name)
     self._sideout_stages, self.sideout_channels = SIDEOUT[phi]
     self.projects = nn.ModuleList(
         [
             nn.Conv2d(in_channels=i, out_channels=out_channels, kernel_size=1,)
             for i in self.sideout_channels
         ]
     )
コード例 #3
0
 def __init__(self):
     """
     :param size: 输入尺寸大小,为整数
     """
     super().__init__()
     self.layer1 = EfficientNet.from_pretrained('efficientnet-b7')
     self.linear = nn.Linear(1000, 4)
コード例 #4
0
    def __init__(self, weights_path):
        model = EfficientNet.from_name('efficientnet-b7',
                                       override_params={'num_classes': 1})

        for module in model.modules():
            if isinstance(module, MBConvBlock):
                if module._block_args.expand_ratio != 1:
                    expand_conv = module._expand_conv
                    seq_expand_conv = SeqExpandConv(
                        expand_conv.in_channels, expand_conv.out_channels,
                        VIDEO_SEQUENCE_MODEL_SEQUENCE_LENGTH)
                    module._expand_conv = seq_expand_conv
        self.model = model.cuda().eval()

        normalize = Normalize(mean=[0.485, 0.456, 0.406],
                              std=[0.229, 0.224, 0.225])
        self.transform = Compose([
            SmallestMaxSize(VIDEO_MODEL_MIN_SIZE),
            CenterCrop(VIDEO_MODEL_CROP_HEIGHT, VIDEO_MODEL_CROP_WIDTH),
            normalize,
            ToTensor()
        ])

        state = torch.load(weights_path,
                           map_location=lambda storage, loc: storage)
        state = {key: value.float() for key, value in state.items()}
        self.model.load_state_dict(state)
コード例 #5
0
    def create_model(self):
        kwargs = {
            'model_name': self.encoder_name,
            'num_classes': self.num_classes,
        }
        model = EfficientNet.from_pretrained(**kwargs)

        return model
    def __init__(
        self,
        hparams,
        model_name: Optional[str] = None,
        fold: Optional[int] = None,
        train_df: Optional[pd.DataFrame] = None,
        valid_df: Optional[pd.DataFrame] = None,
        test_df: Optional[pd.DataFrame] = None,
        train_images_path: Optional[Path] = None,
        valid_images_path: Optional[Path] = None,
        test_images_path: Optional[Path] = None,
        path: Optional[Path] = None,
    ):
        super().__init__()
        self.path = path
        self.model_name = model_name
        self.sz = 384
        self.hparams = hparams
        self.lr = self.hparams.lr
        self.fold = fold

        self.train_df = train_df
        self.valid_df = valid_df
        self.test_df = test_df

        self.train_images_path = train_images_path
        self.valid_images_path = valid_images_path
        self.test_images_path = test_images_path

        if "resnet" in self.hparams.arch:
            self.model = models.__dict__[self.hparams.arch](pretrained=True)
            n_in = self.model.fc.in_features
            remove_range = 2
            self.model = nn.Sequential(
                *list(self.model.children())[:-remove_range])
            n_out = 1
            self.head = nn.Sequential(
                nn.AdaptiveAvgPool2d(1),
                Flatten(),
                nn.Dropout(),
                nn.Linear(n_in, n_out),
            )

        # if "resnext" in self.hparams.arch:
        #     self.model = torch.hub.load(
        #         "facebookresearch/semi-supervised-ImageNet1K-models",
        #         self.hparams.arch,
        #     )
        #     c_feature = self.model.fc.in_features
        #     remove_range = 2  # TODO: ditto
        if "efficient" in self.hparams.arch:
            self.model = EfficientNet.from_pretrained(
                self.hparams.arch,
                advprop=True,
            )
            self.head = nn.Sequential(nn.ReLU(), nn.Dropout(),
                                      nn.Linear(1000, 1))
コード例 #7
0
    def __init__(self, first_weights_path, second_weights_path):
        first_model = EfficientNet.from_name(
            'efficientnet-b7', override_params={'num_classes': 1})
        self.first_model = first_model.cuda().eval()
        second_model = EfficientNet.from_name(
            'efficientnet-b7', override_params={'num_classes': 1})
        self.second_model = second_model.cuda().eval()

        first_normalize = Normalize(mean=[0.485, 0.456, 0.406],
                                    std=[0.229, 0.224, 0.225])
        self.first_transform = Compose([
            SmallestMaxSize(VIDEO_MODEL_CROP_WIDTH),
            PadIfNeeded(VIDEO_MODEL_CROP_HEIGHT, VIDEO_MODEL_CROP_WIDTH),
            CenterCrop(VIDEO_MODEL_CROP_HEIGHT, VIDEO_MODEL_CROP_WIDTH),
            first_normalize,
            ToTensor()
        ])

        second_normalize = Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
        self.second_transform = Compose([
            SmallestMaxSize(VIDEO_MODEL_MIN_SIZE),
            CenterCrop(VIDEO_MODEL_CROP_HEIGHT, VIDEO_MODEL_CROP_WIDTH),
            second_normalize,
            ToTensor()
        ])

        first_state = torch.load(first_weights_path,
                                 map_location=lambda storage, loc: storage)
        first_state = {
            key: value.float()
            for key, value in first_state.items()
        }
        self.first_model.load_state_dict(first_state)

        second_state = torch.load(second_weights_path,
                                  map_location=lambda storage, loc: storage)
        second_state = {
            key: value.float()
            for key, value in second_state.items()
        }
        self.second_model.load_state_dict(second_state)
コード例 #8
0
def model_fn(model_dir, model_name, num_classes):
    logger.info('model_fn')
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = EfficientNet.from_pretrained(model_name, conv_type='Std')
    for param in model.parameters():
        param.requires_grad = False
    num_features = model._fc.in_features
    model._fc = nn.Linear(num_features, num_classes)
    if torch.cuda.device_count() > 1:
        logger.info("Gpu count: {}".format(torch.cuda.device_count()))
        model = nn.DataParallel(model)

    with open(os.path.join(model_dir, 'model.pth'), 'rb') as f:
        model.load_state_dict(torch.load(f))
    return model.to(device)
コード例 #9
0
def model_fn(model_dir, model_name, num_classes, conv_type):
    logger.info('model_fn')
    device = "cuda" if torch.cuda.is_available() else "cpu"
    if conv_type == 'Std':
        layerdict, offsetdict = None, None
    elif conv_type == 'Equi':
        layerdict, offsetdict = torch.load('layertest.pt'), torch.load(
            'offsettest.pt')
    model = EfficientNet.from_pretrained(model_name,
                                         conv_type=conv_type,
                                         layerdict=layerdict,
                                         offsetdict=offsetdict)
    #for param in model.parameters():
    #    param.requires_grad = False
    num_features = model._fc.in_features
    model._fc = nn.Linear(num_features, num_classes)
    if torch.cuda.device_count() > 1:
        logger.info("Gpu count: {}".format(torch.cuda.device_count()))
        model = nn.DataParallel(model)

    with open(os.path.join(model_dir, 'model.pth'), 'rb') as f:
        model.load_state_dict(torch.load(f))
    return model.to(device)
コード例 #10
0
def _test(args):
    """
    is_distributed = len(args.hosts) > 1 and args.dist_backend is not None
    logger.debug("Distributed training - {}".format(is_distributed))

    if is_distributed:
        # Initialize the distributed environment.
        world_size = len(args.hosts)
        os.environ['WORLD_SIZE'] = str(world_size)
        host_rank = args.hosts.index(args.current_host)
        os.environ['RANK'] = str(host_rank)
        dist.init_process_group(backend=args.dist_backend, rank=host_rank, world_size=world_size)
        logger.info(
            'Initialized the distributed environment: \'{}\' backend on {} nodes. '.format(
                args.dist_backend,
                dist.get_world_size()) + 'Current host rank is {}. Using cuda: {}. Number of gpus: {}'.format(
                dist.get_rank(), torch.cuda.is_available(), args.num_gpus))
                
    """
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    logger.info("Device Type: {}".format(device))

    logger.info("Loading dataset from ImageFolder")
    img_size = EfficientNet.get_image_size(args.model_name)
    transform = transforms.Compose([
        transforms.Resize((img_size, img_size)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    #target_transform = transforms.Compose([transforms.Resize((224,224)),
    #                                       transforms.ToTensor()])

    root = os.path.join(args.data_dir, "val")
    testset = torchvision.datasets.ImageFolder(root,
                                               transform=transform,
                                               target_transform=None)
    test_loader = DataLoader(testset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=args.workers)
    class_map = testset.classes
    num_classes = len(testset.classes)

    logger.info("Model loaded")
    model = model_fn(args.model_dir, args.model_name, num_classes,
                     args.conv_type)

    with torch.no_grad():
        running_acc = 0.0
        for i, data in enumerate(test_loader):
            # get the inputs
            inputs, labels = data
            inputs, labels = inputs.to(device), labels.to(device)
            model.eval()
            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)
            # print statistics
            running_acc += torch.sum(preds == labels.data)

        epoch_acc = running_acc.double() / len(testset)
        print("test Acc: %.3f" % (epoch_acc))
        """
        preds = torch.topk(outputs, k=7).indices.squeeze(0).tolist()  
        print(preds)      
        print('-----')
        if args.batch_size > 1:
            for batch in preds:
                for idx in batch:
                    category = class_map[idx]
                    prob = torch.softmax(outputs, dim=1)[0, idx].item()
                    print('{:<75} ({:.2f}%)'.format(category, prob*100))
        else:            
            for idx in preds:
                category = class_map[idx]
                prob = torch.softmax(outputs, dim=1)[0, idx].item()
                print('{:<75} ({:.2f}%)'.format(category, prob*100))
        """

    print('Finished Testing')
コード例 #11
0
    with torch.no_grad():
        outputs = model(img)
    # Print predictions
    print('-----')
    cout = 0
    for idx in torch.topk(outputs, k=5).indices.squeeze(0).tolist():
        cout += 1
        prob = torch.softmax(outputs, dim=1)[0, idx].item()
        print('{label:<75} ({p:.2f}%)'.format(label=labels_map[idx], p=prob*100))



if __name__ == '__main__':
    # 自动下载到本地预训练
    # model_ft = EfficientNet.from_pretrained('efficientnet-b0')
    model_ft = EfficientNet.from_name('efficientnet-b5')
    # 离线加载预训练,需要事先下载好
    # model_ft = EfficientNet.from_name(net_name)
    # net_weight = 'eff_weights/' + pth_map[net_name]
    # state_dict = torch.load(net_weight)
    # model_ft.load_state_dict(state_dict)

    # 修改全连接层
    num_ftrs = model_ft._fc.in_features
    model_ft._fc = nn.Linear(num_ftrs, class_num)
    if use_gpu:
        model_ft = model_ft.cuda()
    print('-' * 10)
    print('Test Accuracy:')
    model_ft.load_state_dict(torch.load("./images/model/efficientnet-b5.pth"))
    # criterion = nn.CrossEntropyLoss().cuda()
コード例 #12
0

# train
pth_map = {
    'efficientnet-b0': 'efficientnet-b0-355c32eb.pth',
    'efficientnet-b1': 'efficientnet-b1-f1951068.pth',
    'efficientnet-b2': 'efficientnet-b2-8bb594d6.pth',
    'efficientnet-b3': 'efficientnet-b3-5fb5a3c3.pth',
    'efficientnet-b4': 'efficientnet-b4-6ed6700e.pth',
    'efficientnet-b5': 'efficientnet-b5-b6417697.pth',
    'efficientnet-b6': 'efficientnet-b6-c76e70fd.pth',
    'efficientnet-b7': 'efficientnet-b7-dcc49843.pth',
    'efficientnet-b8': 'adv-efficientnet-b8-22a8fe65.pth',
}
# 离线加载预训练
model_ft = EfficientNet.from_name(net_name)
net_weight = './EfficientNet_model/' + pth_map[
    net_name]  #####################EfficientNet_model
state_dict = torch.load(net_weight)
model_ft.load_state_dict(state_dict)

# 修改全连接层
num_ftrs = model_ft._fc.in_features
model_ft._fc = nn.Linear(num_ftrs, class_num)

criterion = nn.CrossEntropyLoss()  #获得交叉熵损失
if use_gpu:
    model_ft = model_ft.cuda()
    criterion = criterion.cuda()

optimizer = optim.SGD((model_ft.parameters()),
コード例 #13
0
ファイル: main.py プロジェクト: zjw97/EfficientNet
    arg.save_dir = "%s/%s/%d/" % (os.getcwd(), arg.save_dir, time)
    if os.path.exists(arg.save_dir) is False:
        os.mkdir(arg.save_dir)

    Visualizer = SummaryWriter(arg.save_dir)

    arg.save_dir = os.path.join(arg.save_dir, "%d.log"%(int(time)))
    logger = logger(arg.save_dir)
    logger.write(str(arg) + "\n")

    os.environ["CUDA_VISIBLE_DEVICES"] = arg.gpus
    torch_device = torch.device("cuda")

    train_loader, val_loader = get_loaders(arg.root, arg.batch_size, 528, arg.num_workers)

    net = EfficientNet.from_pretrained(arg.model, num_classes=2)
    net = nn.DataParallel(net).to(torch_device)
    loss = nn.CrossEntropyLoss()

    scaled_lr = arg.lr * arg.batch_size / 256
    optim = {
        "adam" : lambda : torch.optim.Adam(net.parameters(), lr=scaled_lr, betas=arg.beta, weight_decay=arg.decay),
        "rmsprop" : lambda : torch.optim.RMSprop(net.parameters(), lr=scaled_lr, momentum=arg.momentum, alpha=arg.alpha, weight_decay=arg.decay),
        "SGD": lambda :torch.optim.SGD(net.parameters(), lr=arg.lr, momentum=arg.momentum, weight_decay=arg.decay)
    }[arg.optim]()

    scheduler = get_scheduler(optim, arg.scheduler, int(2.4*len(train_loader)), arg.epoch * len(train_loader))

    model = Runner(arg, net, optim, torch_device, loss, logger, Visualizer, scheduler)
    if arg.test is False:
        model.train(train_loader, val_loader)
コード例 #14
0
def main():
    criterion = nn.CrossEntropyLoss().cuda()

    if args.model == 'effnet':
        blocks_args, global_params = get_model_params('efficientnet-b5', None)
        blocks_args.append(
            BlockArgs(kernel_size=3,
                      num_repeat=3,
                      input_filters=320,
                      output_filters=480,
                      expand_ratio=6,
                      id_skip=True,
                      stride=[2],
                      se_ratio=0.25))
        model = EfficientNet(blocks_args, global_params)
        pretrained_dict = model_zoo.load_url(url_map['efficientnet-b5'])
        model_dict = model.state_dict()
        del pretrained_dict['_conv_head.weight']
        pretrained_dict = {
            k: v
            for k, v in pretrained_dict.items() if k in model_dict
        }
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)
        model._fc = nn.Sequential(
            nn.BatchNorm1d(2048), nn.Dropout(p=0.5),
            nn.Linear(in_features=2048, out_features=500, bias=True),
            nn.ReLU(), nn.BatchNorm1d(500), nn.Dropout(p=0.5),
            nn.Linear(in_features=500, out_features=60, bias=True), nn.ReLU(),
            nn.BatchNorm1d(60), nn.Dropout(p=0.5),
            nn.Linear(in_features=60, out_features=1, bias=True))

    if args.model.startswith('efficientnet'):
        msize = int(args.model[-1])
        if msize < 6:
            model = EfficientNet.from_pretrained(args.model)
        else:
            model = EfficientNet.from_name(args.model)
        model._fc = nn.Sequential(
            nn.BatchNorm1d(2048), nn.Dropout(p=0.4),
            nn.Linear(in_features=2048, out_features=500, bias=True),
            nn.ReLU(), nn.BatchNorm1d(500), nn.Dropout(p=0.4),
            nn.Linear(in_features=500, out_features=60, bias=True), nn.ReLU(),
            nn.BatchNorm1d(60), nn.Dropout(p=0.4),
            nn.Linear(in_features=60, out_features=1, bias=True))

    elif args.model == 'pnasv2':
        _, global_params = get_model_params('efficientnet-b1', None)
        model = pretrainedmodels.__dict__['pnasnet5large'](
            num_classes=1000, pretrained='imagenet')
        model.avg_pool = nn.Sequential(
            MBConvBlock(
                BlockArgs(kernel_size=3,
                          num_repeat=3,
                          input_filters=4320,
                          output_filters=2160,
                          expand_ratio=3,
                          id_skip=True,
                          stride=[2],
                          se_ratio=0.25), global_params),
            nn.AdaptiveAvgPool2d(1))
        model.last_linear = nn.Sequential(
            nn.BatchNorm1d(2160),
            nn.Dropout(p=0.25),
            nn.Linear(in_features=2160, out_features=400, bias=True),
            nn.ReLU(),
            nn.BatchNorm1d(400),
            nn.Dropout(p=0.25),
            nn.Linear(in_features=400, out_features=5, bias=True),
        )

    elif args.model in pretrainedmodels.__dict__.keys():
        model = pretrainedmodels.__dict__[args.model](num_classes=1000,
                                                      pretrained='imagenet')
        model.avg_pool = nn.AdaptiveAvgPool2d(1)
        model.last_linear = nn.Sequential(
            nn.BatchNorm1d(4320),
            nn.Dropout(p=0.5),
            nn.Linear(in_features=4320, out_features=600, bias=True),
            nn.ReLU(),
            nn.BatchNorm1d(600),
            nn.Dropout(p=0.5),
            nn.Linear(in_features=600, out_features=100, bias=True),
            nn.ReLU(),
            nn.BatchNorm1d(100),
            nn.Dropout(p=0.5),
            nn.Linear(in_features=100, out_features=5, bias=True),
        )
    elif args.model == 'nasnetv2':
        model = nasnetv2()

    #print(model)
    model = model.to(device)
    if torch.cuda.is_available():
        model = nn.DataParallel(model)
        cudnn.benchmark = True
    if args.checkpoint:
        print('Resuming training from epoch {}, loading {}...'.format(
            args.resume, args.checkpoint))
        weight_file = os.path.join(args.root, args.checkpoint)
        model.load_state_dict(
            torch.load(weight_file, map_location=lambda storage, loc: storage))

    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=args.weight_decay)
    scheduler = MultiStepLR(
        optimizer,
        milestones=(np.array([1 / 3, 1 / 2, 2 / 3, 5 / 6]) *
                    args.epochs).astype(int).tolist(),
        gamma=0.1)

    train_csv = os.path.join(args.root, 'train.csv')
    #dist= df.groupby('diagnosis').count().values.reshape(5)
    df1 = pd.read_csv(train_csv,
                      header=1,
                      names=['id', 'diagnosis'],
                      dtype={
                          'id': str,
                          'diagnosis': np.int8
                      })
    df1['dataset'] = 0
    df1, df_val = \
        train_test_split(df1, test_size=0.05, random_state=40)

    print('Current Competition:')
    print(df1.groupby('diagnosis').count())

    #Previous dataset
    ext_csv = os.path.join(args.root, 'exter-resized',
                           'trainLabels_cropped.csv')
    df2 = pd.read_csv(ext_csv,
                      header=1,
                      names=['id', 'diagnosis'],
                      usecols=[2, 3],
                      dtype={
                          'id': str,
                          'diagnosis': np.int8
                      })
    df2['diagnosis'] = df2['diagnosis'].astype(int)
    df2['dataset'] = 1
    print('Previous Dataset:')
    print(df2.groupby('diagnosis').count())

    #IEEE
    df3 = pd.read_csv('IEEE/label/train.csv',
                      header=1,
                      names=['id', 'diagnosis'],
                      usecols=[0, 1],
                      dtype={
                          'id': str,
                          'diagnosis': np.int8
                      })
    df3 = df3.append(
        pd.read_csv('IEEE/label/test.csv',
                    header=1,
                    names=['id', 'diagnosis'],
                    usecols=[0, 1],
                    dtype={
                        'id': str,
                        'diagnosis': np.int8
                    }))
    df3['dataset'] = 2
    print('IEEE')
    print(df3.groupby('diagnosis').count())

    #messidor
    '''
    df4=pd.DataFrame()
    for i in range(1,4):
        for j in range(1,5):
            df4=df4.append(pd.read_excel(
                    'messidor/Annotation_Base'+str(i)+str(j)+'.xls',header=1,
                    names =['id', 'diagnosis'], usecols=[0,2], 
                    dtype={'id':str, 'diagnosis':np.int8}))
    df4['dataset'] = 3
    print('Messidor:')
    print(df4.groupby('diagnosis').count())
    '''

    print('Overall val:')
    print(df_val.groupby('diagnosis').count())

    for i in range(args.resume):
        scheduler.step()

    for epoch in range(args.resume, args.epochs):

        df = pd.DataFrame()
        df = df.append(
            df1.groupby('diagnosis').apply(
                lambda x: x.sample(200, replace=True)).reset_index(drop=True))
        df = df.append(
            df2.groupby('diagnosis').apply(
                lambda x: x.sample(700, replace=True)).reset_index(drop=True))
        df = df.append(
            df3.groupby('diagnosis').apply(
                lambda x: x.sample(20, replace=True)).reset_index(drop=True))
        print('Overall train:', len(df))
        print(df.groupby('diagnosis').count())

        data = {'train': df, 'val': df_val}
        dataset = {
            x: APTOSDataset(x, data[x], transform[x])
            for x in ['train', 'val']
        }
        dataloader = {
            x: DataLoader(dataset[x],
                          batch_size=args.batch,
                          shuffle=(x == 'train'),
                          num_workers=args.workers,
                          pin_memory=True)
            for x in ['train', 'val']
        }
        print('Epoch {}/{}'.format(epoch + 1, args.epochs))
        print('-' * 10)

        for phase in ['train', 'val']:
            if phase == 'train':
                scheduler.step()
                model.train()
            else:
                model.eval()

            nb = 0
            num = 0
            running_loss = 0
            running_correct = 0
            predict = []
            truth = []

            for inputs, targets in dataloader[phase]:
                t1 = time.time()
                batch = inputs.size(0)
                inputs = inputs.to(device).float()
                targets = targets.to(device).long()
                optimizer.zero_grad()
                with torch.set_grad_enabled(phase == 'train'):
                    outputs = model(inputs)
                    loss = criterion(outputs, targets)
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()
                nb += 1
                num += batch
                loss = loss.item()
                running_loss += loss * inputs.size(0)
                #propose=outputs.round().long().clamp(0,4)
                max, propose = outputs.data.max(1)
                correct = (propose == targets).sum().item()
                acc = correct / batch * 100
                running_correct += correct
                p = propose.cpu().tolist()
                t = targets.cpu().tolist()
                predict += p
                truth += t
                t2 = time.time()
                if nb % args.print == 0:

                    print('|'.join(str(x) for x in p))
                    print('|'.join(str(x) for x in t))
                    print('n:{:d}, l:{:.4f}|{:.4f}, a:{:.4f}|{:.4f}, t:{:.4f}' \
                          .format(num, loss, running_loss/num, acc, running_correct/num*100, t2-t1))

            print('num:', num, len(truth))
            cm = confusion_matrix(truth, predict, labels=[0, 1, 2, 3, 4])
            ht = histogram(truth, 0, 4)
            hp = histogram(predict, 0, 4)
            hm = np.outer(ht, hp) / np.float(num)
            kappa = cohen_kappa_score(truth, predict, labels=[0, 1, 2, 3, 4])
            kappa2 = quadratic_weighted_kappa(truth, predict, 0, 4)
            print('=' * 5, phase, '=' * 5)
            print("Confusion matrix")
            print(cm)
            print("Hist matrix")
            print(ht)
            print(hp)
            print(hm)
            print('{:s}:{:d}, n:{:d}, l:{:.4f}, a:{:.4f}, k:{:.4f}, k2:{:.4f}, t:{:.4f}' \
                  .format(phase, epoch+1, num, running_loss/num, \
                          running_correct/num*100, kappa, kappa2, t2-t1))

            print('=' * 15)

            if phase == 'val':
                torch.save(
                    model.state_dict(),
                    os.path.join(args.save_folder,
                                 'out_' + str(epoch + 1) + '.pth'))

        print()
コード例 #15
0
import torchvision.transforms as transforms
import PIL

# Define Transforms
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
b0_input_transform = transforms.Compose([
    transforms.Resize(256, PIL.Image.BICUBIC),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    normalize,
])

# Run Evaluation
ImageNet.benchmark(
    model=EfficientNet.from_pretrained(model_name='efficientnet-b0'),
    paper_model_name='EfficientNet-B0',
    paper_arxiv_id='1905.11946',
    paper_pwc_id='efficientnet-rethinking-model-scaling-for',
    input_transform=b0_input_transform,
    batch_size=256,
    num_gpu=1)

# Define Transforms
b1_input_transform = transforms.Compose([
    transforms.Resize(273, PIL.Image.BICUBIC),
    transforms.CenterCrop(240),
    transforms.ToTensor(),
    normalize,
])
コード例 #16
0
def _train(args):
    """
    is_distributed = len(args.hosts) > 1 and args.dist_backend is not None
    logger.debug("Distributed training - {}".format(is_distributed))

    if is_distributed:
        # Initialize the distributed environment.
        world_size = len(args.hosts)
        os.environ['WORLD_SIZE'] = str(world_size)
        host_rank = args.hosts.index(args.current_host)
        os.environ['RANK'] = str(host_rank)
        dist.init_process_group(backend=args.dist_backend, rank=host_rank, world_size=world_size)
        logger.info(
            'Initialized the distributed environment: \'{}\' backend on {} nodes. '.format(
                args.dist_backend,
                dist.get_world_size()) + 'Current host rank is {}. Using cuda: {}. Number of gpus: {}'.format(
                dist.get_rank(), torch.cuda.is_available(), args.num_gpus))          
    """

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    logger.info("Device Type: {}".format(device))

    logger.info("Loading dataset from folder")
    img_size = EfficientNet.get_image_size(args.model_name)
    transform = transforms.Compose([
        transforms.Resize((img_size, img_size)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    transformaugment = transforms.Compose([
        transforms.Resize((img_size, img_size)),
        transforms.ColorJitter(brightness=0.5,
                               contrast=0.5,
                               saturation=0.5,
                               hue=0.5),
        transforms.RandomAffine(10, translate=(0.2, 0.2)),
        transforms.RandomHorizontalFlip(0.5),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
        transforms.RandomErasing(value='random')
    ])
    #target_transform = transforms.Compose([transforms.Resize((224,224)),
    #                                       transforms.ToTensor()])

    root = os.path.join(args.data_dir, "train")
    FullDataset = torchvision.datasets.ImageFolder(root,
                                                   transform=None,
                                                   target_transform=None)
    trainset, validset, testset = split_to_3datasets(FullDataset)

    trainset = TransformDataset(trainset, transform=transformaugment)
    train_loader = DataLoader(trainset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.workers)

    validset = TransformDataset(validset, transform=transform)
    valid_loader = DataLoader(validset,
                              batch_size=args.batch_size,
                              shuffle=False,
                              num_workers=args.workers)

    testset = TransformDataset(testset, transform=transform)
    test_loader = DataLoader(testset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=args.workers)
    """
    root = 'val'
    validset = torchvision.datasets.ImageFolder(root, transform = transform, target_transform = None)
    valid_loader = DataLoader(validset, batch_size=1,
                                              shuffle=False, num_workers=args.workers)
    """
    class_map = FullDataset.classes
    num_classes = len(FullDataset.classes)

    logger.info("Model loaded")
    if (args.conv_type == 'Std'):
        layerdict, offsetdict = None, None
    elif (args.conv_type == 'Equi'):
        layerdict, offsetdict = torch.load('layertrain.pt'), torch.load(
            'offsettrain.pt')
    model = EfficientNet.from_pretrained(args.model_name,
                                         conv_type=args.conv_type,
                                         layerdict=layerdict,
                                         offsetdict=offsetdict)
    for param in model.parameters():
        param.requires_grad = False
    num_features = model._fc.in_features
    model._fc = nn.Linear(num_features, num_classes)

    if torch.cuda.device_count() > 1:
        logger.info("Gpu count: {}".format(torch.cuda.device_count()))
        model = nn.DataParallel(model)

    model = model.to(device)
    criterion = nn.CrossEntropyLoss().to(device)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                momentum=args.momentum)
    writer = SummaryWriter(
        log_dir="runs/{}".format(args.logdir),
        comment="visualising losses of training and validation")

    for epoch in range(1, args.epochs + 1):
        epochtime1 = time.time()
        # training phase
        running_loss = 0.0
        running_acc = 0.0
        for i, data in enumerate(train_loader):
            # get the inputs
            inputs, labels = data
            inputs, labels = inputs.to(device), labels.to(device)

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            model.train()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            _, preds = torch.max(outputs, 1)
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()
            running_acc += torch.sum(preds == labels.data)
            """
            if i % args.batch_size == args.batch_size-1:  # print every args.batch_size mini-batches
                print('[%d, %5d] loss: %.3f Acc: %.3f' %
                      (epoch, i + 1, running_loss / args.batch_size, running_acc / args.batch_size))
                running_loss = 0.0
                running_acc = 0.0
            """
        epoch_loss = running_loss / len(trainset)
        epoch_acc = running_acc.double() / len(trainset)
        #print("train loss: %.3f train Acc: %.3f" %(epoch_loss, epoch_acc))
        writer.add_scalar("training_loss", epoch_loss, epoch)
        writer.add_scalar("training_acc", epoch_acc, epoch)
        # validation phase
        if (epoch % 1 == 0):
            with torch.no_grad():
                running_loss = 0.0
                running_acc = 0.0
                for i, data in enumerate(valid_loader):
                    # get the inputs
                    inputs, labels = data
                    inputs, labels = inputs.to(device), labels.to(device)
                    model.eval()
                    outputs = model(inputs)
                    loss = criterion(outputs, labels)
                    _, preds = torch.max(outputs, 1)

                    # print statistics
                    running_loss += loss.item()
                    running_acc += torch.sum(preds == labels.data)
                    """
                    if i % 1 == 0:  # print every 1 mini-batches
                        print('[%d, %5d] loss: %.3f Acc: %.3f' %
                        (epoch, i + 1, running_loss / 1, running_acc / 1))
                        running_loss = 0.0
                        running_acc = 0.0
                    """
                epoch_loss = running_loss / len(validset)
                epoch_acc = running_acc.double() / len(validset)
                #print("validation loss: %.3f validation Acc: %.3f" %(epoch_loss, epoch_acc))
                writer.add_scalar("validation_loss", epoch_loss, epoch)
                writer.add_scalar("validation_acc", epoch_acc, epoch)
        epochtime2 = time.time()
    epochdiff = epochtime2 - epochtime1
    writer.close()
    print("time for 1 complete epoch: ", epochdiff)
    print('Finished Training')
    """
    answer = input("Do you want to run inference on testset (y/n) ? ")
    if answer =='y':
        with torch.no_grad():
            for i,data in enumerate(test_loader):
                inputs, labels = data
                inputs, labels = inputs.to(device), labels.to(device)
                model.eval()
                outputs = model(inputs)
                preds = torch.topk(outputs, k=num_classes).indices.squeeze(0).tolist()        
                print('-----')
                for idx in preds:
                    category = class_map[idx]
                    prob = torch.softmax(outputs, dim=1)[0, idx].item()
                    print('{:<75} ({:.2f}%)'.format(category, prob*100))
    """

    return _save_model(model, args.model_dir)
コード例 #17
0
def load_weight(net, load_path, Parallel=True):
    state = torch.load(load_path)

    new_state = {}
    for key, value in state.items():
        new_state[key[7:]] = value
    net.load_state_dict(new_state)

    if Parallel:
        net = torch.nn.DataParallel(net).to('cuda')

    return net


net = EfficientNet.from_pretrained("efficientnet-b0", num_classes=2)

net = load_weight(
    net,
    "log/PAIP2020_MSI/efficientnet-b0_MSI_1_all_data/model_net_{0}.pth".format(
        712 * 1), False).cuda()
net.eval()
with open("validation_MSI.csv", 'w', newline='') as csvfile:
    writer = csv.DictWriter(csvfile, ["WSI_ID", "MSI-H"])
    writer.writeheader()
    for i in range(1, 32):
        print(i)
        file = "data/wsi_5x_patchs/{0:02d}".format(i)

        num_positive = 0
        num_negative = 0
コード例 #18
0
print(blocks_args)
#[BlockArgs(num_repeat=1, kernel_size=3, stride=[1], expand_ratio=1, input_filters=32, output_filters=16, se_ratio=0.25, id_skip=True), 
# BlockArgs(num_repeat=2, kernel_size=3, stride=[2], expand_ratio=6, input_filters=16, output_filters=24, se_ratio=0.25, id_skip=True), 
# BlockArgs(num_repeat=2, kernel_size=5, stride=[2], expand_ratio=6, input_filters=24, output_filters=40, se_ratio=0.25, id_skip=True), 
# BlockArgs(num_repeat=3, kernel_size=3, stride=[2], expand_ratio=6, input_filters=40, output_filters=80, se_ratio=0.25, id_skip=True), 
# BlockArgs(num_repeat=3, kernel_size=5, stride=[1], expand_ratio=6, input_filters=80, output_filters=112, se_ratio=0.25, id_skip=True), 
# BlockArgs(num_repeat=4, kernel_size=5, stride=[2], expand_ratio=6, input_filters=112, output_filters=192, se_ratio=0.25, id_skip=True), 
# BlockArgs(num_repeat=1, kernel_size=3, stride=[1], expand_ratio=6, input_filters=192, output_filters=320, se_ratio=0.25, id_skip=True)]
print(global_params)


# In[23]:


from efficientnet_pytorch.model import EfficientNet
net = EfficientNet(blocks_args, global_params)
img = torch.ones((1, 3, 224, 224))
print(img.shape)
out = net(img)
print(out.shape)


# In[16]:


class SimpleNet:
    def __init__(self):
        pass
    
    def forward(self, x):
        pass