Example #1
0
 def __init__(self, num_classes):
     super(InceptionResNetV2, self).__init__()
     self.name = "InceptionResNetV2"
     self.model = model_zoo.inceptionresnetv2()
     cfg.mean = self.model.mean
     cfg.std = self.model.std
     self.fc = nn.Linear(1536, num_classes)
Example #2
0
def generate_C2D_model(opt):
    if opt.c2d_model_name == 'inception_v3':
        C, H, W = 3, 299, 299
        model = pretrainedmodels.inceptionv3(num_classes=1000, pretrained='imagenet')
        load_image_fn = utils.LoadTransformImage(model)

    elif opt.c2d_model_name == 'resnet152':
        C, H, W = 3, 224, 224
        model = pretrainedmodels.resnet152(num_classes=1000, pretrained='imagenet')
        load_image_fn = utils.LoadTransformImage(model)

    elif opt.c2d_model_name == 'inception_v4':
        C, H, W = 3, 299, 299
        model = pretrainedmodels.inceptionv4(num_classes=1000, pretrained='imagenet')
        load_image_fn = utils.LoadTransformImage(model)

    elif opt.c2d_model_name == 'inceptionresnetv2':
        C, H, W = 3, 299, 299
        model = pretrainedmodels.inceptionresnetv2(num_classes=1000, pretrained='imagenet')
        load_image_fn = utils.LoadTransformImage(model)

    model.last_linear = utils.Identity()

    if not opt.no_cuda:
        model = model.to(opt.device)

    return load_image_fn, model, (C, H, W)
Example #3
0
File: net.py Project: youngyzzZ/PAF
    def __init__(self, num_classes=1001):
        super(InceptionResNetV2, self).__init__()
        # Modules
        self.conv2d_1a = BasicConv2d(3, 32, kernel_size=3, stride=2)
        self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1)
        self.conv2d_2b = BasicConv2d(32,
                                     64,
                                     kernel_size=3,
                                     stride=1,
                                     padding=1)
        self.maxpool_3a = nn.MaxPool2d(3, stride=2)
        self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1)
        self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1)
        self.maxpool_5a = nn.MaxPool2d(3, stride=2)
        self.mixed_5b = Mixed_5b()
        self.repeat = self._make_layer(Block35, scale=0.17, blocks=10)
        self.mixed_6a = Mixed_6a()
        self.repeat_1 = self._make_layer(Block17, scale=0.10, blocks=20)
        self.mixed_7a = Mixed_7a()
        self.repeat_2 = self._make_layer(Block8, scale=0.20, blocks=9)
        self.block8 = Block8(noReLU=True)
        self.conv2d_7b = BasicConv2d(2080, 1536, kernel_size=1, stride=1)
        self.avgpool_1a = nn.AvgPool2d(8, count_include_pad=False)

        # load pre_train model
        pre_model = pretrainedmodels.inceptionresnetv2(num_classes=1000,
                                                       pretrained='imagenet')
        self.last_linear = pre_model.last_linear
        self.load_state_dict(pre_model.state_dict())

        self.last_linear = nn.Linear(1536, num_classes)
        init_params(self.last_linear)
Example #4
0
 def __init__(self):
     super().__init__()
     self.pad = torch.nn.ConstantPad2d(24, -1)
     self.base_model = pretrainedmodels.inceptionresnetv2()
     self.base_model.conv2d_1a.conv = nn.Conv2d(1,
                                                32,
                                                kernel_size=3,
                                                stride=2,
                                                padding=1,
                                                bias=False)
Example #5
0
    def __init__(self, n_class=10, pretrained='imagenet'):
        super(InceptionResnetv2, self).__init__()
        model = pretrainedmodels.inceptionresnetv2(n_class=1000,
                                                   pretrained=pretrained)
        self.features = model.features
        self.avgpool = model.avgpool_1a
        for child in self.features.children():
            for p in child.parameters():
                p.requires_grad = False

        self.fc = nn.Linear(2048, n_class)
Example #6
0
 def __init__(self, num_classs=100):
     super(Modified_InceptionResnetV2, self).__init__()
     model = pretrainedmodels.inceptionresnetv2(num_classes=1000, pretrained='imagenet')
     self.num_classs = num_classs
     temp = []
     for i, m in enumerate(model.children()):
         if i <= 15:
             temp.append(m)
         else:
             self.classifier = nn.Linear(in_features=1536, out_features=num_classs)
     self.features = nn.Sequential(*temp)
Example #7
0
def test_inceptionresnetv2_model(input_var, pool, assert_equal_outputs):
    original_model = pretrainedmodels.inceptionresnetv2(pretrained='imagenet',
                                                        num_classes=1000)
    finetune_model = make_model(
        'inceptionresnetv2',
        num_classes=1000,
        pool=pool,
        pretrained=True,
    )
    copy_module_weights(original_model.last_linear, finetune_model._classifier)
    assert_equal_outputs(input_var, original_model, finetune_model)
Example #8
0
 def __init__(self,
              input_size=128,
              num_classes=340,
              pretrained='imagenet',
              dropout=0.):
     super().__init__()
     self.model = inceptionresnetv2(num_classes=1000, pretrained=pretrained)
     self.features = self.model.features
     self.relu = nn.ReLU()
     self.avg_pool = nn.AvgPool2d(input_size // 64, stride=1, padding=0)
     self.dropout = nn.Dropout(p=dropout)
     self.last_linear = nn.Linear(1536, num_classes)
Example #9
0
def nets(model, num_class):

    if model == 'inceptionv4':
        model = ptm.inceptionv4(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Linear(model.last_linear.in_features, num_class)
        return model

    if model == 'senet154':
        model = ptm.senet154(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Linear(model.last_linear.in_features, num_class)
        return model

    if model == 'pnasnet':
        model = ptm.pnasnet5large(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Linear(model.last_linear.in_features, num_class)
        return model

    if model == 'xception':
        model = ptm.xception(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Linear(model.last_linear.in_features, num_class)
        return model

    if model == 'incepresv2':
        model = ptm.inceptionresnetv2(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Linear(model.last_linear.in_features, num_class)
        return model

    if model == 'resnet152':
        model = models.resnet152(pretrained=True)
        model.fc = nn.Linear(2048, num_class)
        return model
        
    if model == 'se_resxt101':
        model = ptm.se_resnext101_32x4d(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Linear(model.last_linear.in_features, num_class)
        return model
        
    if model == 'nasnet':
        model = ptm.nasnetalarge(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Linear(model.last_linear.in_features, num_class)
        return model
        
    if model == 'dpn': # 224 input size
        model = ptm.dpn107(num_classes=1000, pretrained='imagenet+5k')
        model.last_linear = nn.Conv2d(model.last_linear.in_channels, num_class,
                                      kernel_size=1, bias=True)
        return model
        
    if model == 'resnext101':# 320 input size
        model = torch.hub.load('facebookresearch/WSL-Images', 'resnext101_32x16d_wsl')
        model.fc = nn.Linear(2048, num_class)
        return model  
    def __init__(self, norm_layer, num_filters):
        """Creates an `FPN` instance for feature extraction.
        Args:
          num_filters: the number of filters in each output pyramid level
          pretrained: use ImageNet pre-trained backbone feature extractor
        """

        super().__init__()
        self.inception = inceptionresnetv2(num_classes=1000,
                                           pretrained='imagenet')

        self.enc0 = self.inception.conv2d_1a
        self.enc1 = nn.Sequential(
            self.inception.conv2d_2a,
            self.inception.conv2d_2b,
            self.inception.maxpool_3a,
        )  # 64
        self.enc2 = nn.Sequential(
            self.inception.conv2d_3b,
            self.inception.conv2d_4a,
            self.inception.maxpool_5a,
        )  # 192
        self.enc3 = nn.Sequential(
            self.inception.mixed_5b,
            self.inception.repeat,
            self.inception.mixed_6a,
        )  # 1088
        self.enc4 = nn.Sequential(
            self.inception.repeat_1,
            self.inception.mixed_7a,
        )  #2080
        self.td1 = nn.Sequential(
            nn.Conv2d(num_filters, num_filters, kernel_size=3, padding=1),
            norm_layer(num_filters), nn.ReLU(inplace=True))
        self.td2 = nn.Sequential(
            nn.Conv2d(num_filters, num_filters, kernel_size=3, padding=1),
            norm_layer(num_filters), nn.ReLU(inplace=True))
        self.td3 = nn.Sequential(
            nn.Conv2d(num_filters, num_filters, kernel_size=3, padding=1),
            norm_layer(num_filters), nn.ReLU(inplace=True))
        self.pad = nn.ReflectionPad2d(1)
        self.lateral4 = nn.Conv2d(2080, num_filters, kernel_size=1, bias=False)
        self.lateral3 = nn.Conv2d(1088, num_filters, kernel_size=1, bias=False)
        self.lateral2 = nn.Conv2d(192, num_filters, kernel_size=1, bias=False)
        self.lateral1 = nn.Conv2d(64, num_filters, kernel_size=1, bias=False)
        self.lateral0 = nn.Conv2d(32,
                                  num_filters // 2,
                                  kernel_size=1,
                                  bias=False)

        for param in self.inception.parameters():
            param.requires_grad = False
Example #11
0
def inceptionresnetv2(
        input_size=(3, 299, 299), num_classes=1000, pretrained=None):
    # Minimum Input size = (96,96) one is must above than 97
    model = models.inceptionresnetv2(num_classes=1000, pretrained=pretrained)
    if input_size != (3, 299, 299):
        model.conv2d_1a.conv = nn.Conv2d(input_size[0],
                                         32,
                                         kernel_size=(3, 3),
                                         stride=(2, 2),
                                         bias=False)
        model.input_size = input_size
    # calculate last avgpool_1a kernel size
    test_tensor = torch.randn(1, input_size[0], input_size[1], input_size[2])
    x = model.conv2d_1a(test_tensor)
    x = model.conv2d_2a(x)
    x = model.conv2d_2b(x)
    x = model.maxpool_3a(x)
    x = model.conv2d_3b(x)
    x = model.conv2d_4a(x)
    x = model.maxpool_5a(x)
    x = model.mixed_5b(x)
    x = model.repeat(x)
    x = model.mixed_6a(x)
    x = model.repeat_1(x)
    x = model.mixed_7a(x)
    x = model.repeat_2(x)
    x = model.block8(x)
    x = model.conv2d_7b(x)
    #print(x.shape)

    model.avgpool_1a = nn.AvgPool2d(kernel_size=(x.shape[2], x.shape[3]),
                                    padding=0)
    x = model.avgpool_1a(x)
    x = x.view(x.size(0), -1).shape[1]
    model.last_linear = nn.Linear(in_features=x,
                                  out_features=num_classes,
                                  bias=True)
    return model
Example #12
0
def get_backbone(backbone, pretrained=True):
    if backbone in ['resnext50_32x4d_ssl', 'resnet18_ssl', 'resnet50_ssl', 'resnext101_32x4d_ssl']:
        if pretrained:
            model = torch.hub.load(panda_config.ARCH_TO_PRETRAINED[backbone], backbone)
        else:
            model = getattr(_models, backbone.split('_ssl')[0])(pretrained=pretrained)
        encoder = nn.Sequential(*list(model.children())[:-2])
        in_features = model.fc.in_features
    elif backbone in ['resnet18', 'resnet34', 'resnet50']:
        pretrained = 'imagenet' if pretrained else None
        model = getattr(_models, backbone)(pretrained=pretrained)
        in_features = model.fc.in_features
        encoder = nn.Sequential(*list(model.children())[:-2])
    elif backbone in ['se_resnext50_32x4d', 'se_resnext101_32x4d', 'se_resnet50', 'se_resnet101', 'se_resnet152']:
        pretrained = 'imagenet' if pretrained else None
        model = getattr(pretrainedmodels, backbone)(pretrained=pretrained)
        encoder = nn.Sequential(*list(model.children())[:-2])
        in_features = model.last_linear.in_features
    elif backbone.startswith('efficientnet'):
        encoder = enet.EfficientNet.from_name(backbone)
        if pretrained:
            encoder.load_state_dict(torch.load(panda_config.ARCH_TO_PRETRAINED[backbone]))
        in_features = encoder._fc.in_features
        encoder._fc = nn.Identity()
    elif backbone == 'inception_resnet_v2':
        pretrained = 'imagenet' if pretrained else None
        encoder = pretrainedmodels.inceptionresnetv2(pretrained=pretrained)
        in_features = encoder.last_linear.in_features
        encoder.last_linear = nn.Identity()
    elif backbone == 'inception_v4':
        pretrained = 'imagenet' if pretrained else None
        encoder = pretrainedmodels.inceptionv4(pretrained=pretrained)
        in_features = encoder.last_linear.in_features
        encoder.last_linear = nn.Identity()
    else:
        raise ValueError(f'Unrecognized backbone {backbone}')

    return encoder, in_features
def get_inception_resnet_v2_pretrained_model(num_classes):
    pretrained_inception_resnet_v2 = inceptionresnetv2()
    pretrained_inception_resnet_v2.avgpool_1a = nn.AdaptiveAvgPool2d((1, 1))
    pretrained_inception_resnet_v2.last_linear = nn.Linear(1536, num_classes)
    return pretrained_inception_resnet_v2
Example #14
0
def InceptionResNetV2(num_classes, pretrained=False):
    if pretrained:
        model = inceptionresnetv2(pretrained='imagenet')
    model.avg_pool = nn.AdaptiveAvgPool2d(1)
    model.last_linear = nn.Linear(1536, num_classes)
    return model
    if params['model'] == 'resnet101':
        C, H, W = 3, 224, 224
        model = pretrainedmodels.resnet101(pretrained='imagenet')
    elif params['model'] == 'resnet152':
        C, H, W = 3, 224, 224
        model = pretrainedmodels.resnet152(pretrained='imagenet')
    elif params['model'] == 'resnet18':
        C, H, W = 3, 224, 224
        model = pretrainedmodels.resnet18(pretrained='imagenet')
    elif params['model'] == 'resnet34':
        C, H, W = 3, 224, 224
        model = pretrainedmodels.resnet34(pretrained='imagenet')
    elif params['model'] == 'inceptionresnetv2':
        C, H, W = 3, 299, 299
        model = pretrainedmodels.inceptionresnetv2(
            num_classes=1001, pretrained='imagenet+background')
    elif params['model'] == 'googlenet':
        C, H, W = 3, 224, 224
        model = googlenet(pretrained=True)
        print(model)
    else:
        print("doesn't support %s" % (params['model']))

    if params['model'] != 'googlenet':
        load_image_fn = utils.LoadTransformImage(model)
        model.last_linear = utils.Identity()
    else:
        load_image_fn = google_load()

    model = model.cuda()
Example #16
0
def run(train_sets, valid_sets, idx, save_dr):
    batch_size = 8
    imagenet_data = ImageFolder(train_sets, transform=data_transforms['train'])
    test_data = ImageFolder(valid_sets, transform=data_transforms['val'])
    data_loader = DataLoader(imagenet_data,
                             batch_size=batch_size,
                             shuffle=True)
    test_data_loader = DataLoader(test_data, batch_size=1, shuffle=True)

    cls_num = len(imagenet_data.class_to_idx)
    model = inceptionresnetv2(num_classes=1001, pretrained=None)
    model.load_state_dict(
        torch.load('/home/dsl/all_check/inceptionresnetv2-520b38e4.pth'),
        strict=True)
    model.last_linear = nn.Linear(1536, cls_num)
    model.cuda()
    state = {'learning_rate': 0.01, 'momentum': 0.9, 'decay': 0.0005}
    #optimizer = torch.optim.SGD(model.parameters(), state['learning_rate'], momentum=state['momentum'],
    #weight_decay=state['decay'], nesterov=True)

    optimizer = torch.optim.Adam(model.parameters(),
                                 state['learning_rate'],
                                 weight_decay=state['decay'],
                                 amsgrad=True)

    state['label_ix'] = imagenet_data.class_to_idx
    state['cls_name'] = idx

    state['best_accuracy'] = 0
    sch = lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
                                         factor=0.9,
                                         patience=3)

    focal_loss = FocalLoss(gamma=2)
    focal_loss.cuda()

    def train():
        model.train()
        loss_avg = 0.0
        progress = ProgressBar()
        ip1_loader = []
        idx_loader = []
        correct = 0
        for (data, target) in progress(data_loader):
            data, target = torch.autograd.Variable(
                data.cuda()), torch.autograd.Variable(target.cuda())
            output = model(data)
            pred = output.data.max(1)[1]
            correct += float(pred.eq(target.data).sum())
            optimizer.zero_grad()
            loss = focal_loss(output, target)
            loss.backward()
            optimizer.step()
            loss_avg = loss_avg * 0.2 + float(loss) * 0.8
            print(correct, len(data_loader.dataset), loss_avg)
        state['train_accuracy'] = correct / len(data_loader.dataset)
        state['train_loss'] = loss_avg

    def test():
        with torch.no_grad():
            model.eval()
            loss_avg = 0.0
            correct = 0
            for (data, target) in test_data_loader:

                data, target = torch.autograd.Variable(
                    data.cuda()), torch.autograd.Variable(target.cuda())
                output = model(data)
                loss = F.cross_entropy(output, target)
                pred = output.data.max(1)[1]
                correct += float(pred.eq(target.data).sum())
                loss_avg += float(loss)
                state['test_loss'] = loss_avg / len(test_data_loader.dataset)
                state['test_accuracy'] = correct / len(
                    test_data_loader.dataset)
            print(state['test_accuracy'])

    best_accuracy = 0.0
    for epoch in range(40):
        state['epoch'] = epoch
        train()
        test()
        sch.step(state['train_accuracy'])
        best_accuracy = (state['train_accuracy'] + state['test_accuracy']) / 2

        if best_accuracy > state['best_accuracy']:
            state['best_accuracy'] = best_accuracy
            torch.save(model.state_dict(), os.path.join(save_dr, idx + '.pth'))
            with open(os.path.join(save_dr, idx + '.json'), 'w') as f:
                f.write(json.dumps(state))
                f.flush()
        print(state)
        print("Best accuracy: %f" % state['best_accuracy'])

        if state['test_accuracy'] == 1 and epoch > 10:
            break
def main(train_root, train_csv, val_root, val_csv, test_root, test_csv,
         epochs, aug, model_name, batch_size, num_workers, val_samples,
         test_samples, early_stopping_patience, limit_data, images_per_epoch,
         split_id, _run):
    assert(model_name in ('inceptionv4', 'resnet152', 'densenet161',
                          'senet154', 'pnasnet5large', 'nasnetalarge',
                          'xception', 'squeezenet', 'resnext', 'dpn',
                          'inceptionresnetv2', 'mobilenetv2'))

    cv2.setNumThreads(0)

    AUGMENTED_IMAGES_DIR = os.path.join(fs_observer.dir, 'images')
    CHECKPOINTS_DIR = os.path.join(fs_observer.dir, 'checkpoints')
    BEST_MODEL_PATH = os.path.join(CHECKPOINTS_DIR, 'model_best.pth')
    LAST_MODEL_PATH = os.path.join(CHECKPOINTS_DIR, 'model_last.pth')
    RESULTS_CSV_PATH = os.path.join('results', 'results.csv')
    EXP_NAME = _run.meta_info['options']['--name']
    EXP_ID = _run._id

    for directory in (AUGMENTED_IMAGES_DIR, CHECKPOINTS_DIR):
        os.makedirs(directory)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    if model_name == 'inceptionv4':
        model = ptm.inceptionv4(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Linear(model.last_linear.in_features, 2)
        aug['size'] = 299
        aug['mean'] = model.mean
        aug['std'] = model.std
    elif model_name == 'resnet152':
        model = models.resnet152(pretrained=True)
        model.fc = nn.Linear(model.fc.in_features, 2)
        aug['size'] = 224
        aug['mean'] = [0.485, 0.456, 0.406]
        aug['std'] = [0.229, 0.224, 0.225]
    elif model_name == 'densenet161':
        model = models.densenet161(pretrained=True)
        model.classifier = nn.Linear(model.classifier.in_features, 2)
        aug['size'] = 224
        aug['mean'] = [0.485, 0.456, 0.406]
        aug['std'] = [0.229, 0.224, 0.225]
    elif model_name == 'senet154':
        model = ptm.senet154(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Linear(model.last_linear.in_features, 2)
        aug['size'] = model.input_size[1]
        aug['mean'] = model.mean
        aug['std'] = model.std
    elif model_name == 'squeezenet':
        model = ptm.squeezenet1_1(num_classes=1000, pretrained='imagenet')
        model.last_conv = nn.Conv2d(
            512, 2, kernel_size=(1, 1), stride=(1, 1))
        aug['size'] = model.input_size[1]
        aug['mean'] = model.mean
        aug['std'] = model.std
    elif model_name == 'pnasnet5large':
        model = ptm.pnasnet5large(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Linear(model.last_linear.in_features, 2)
        aug['size'] = model.input_size[1]
        aug['mean'] = model.mean
        aug['std'] = model.std
    elif model_name == 'nasnetalarge':
        model = ptm.nasnetalarge(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Linear(model.last_linear.in_features, 2)
        aug['size'] = model.input_size[1]
        aug['mean'] = model.mean
        aug['std'] = model.std
    elif model_name == 'xception':
        model = ptm.xception(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Linear(model.last_linear.in_features, 2)
        aug['size'] = model.input_size[1]
        aug['mean'] = model.mean
        aug['std'] = model.std
    elif model_name == 'dpn':
        model = ptm.dpn131(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Conv2d(model.last_linear.in_channels, 2,
                                      kernel_size=1, bias=True)
        aug['size'] = model.input_size[1]
        aug['mean'] = model.mean
        aug['std'] = model.std
    elif model_name == 'resnext':
        model = ptm.resnext101_64x4d(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Linear(model.last_linear.in_features, 2)
        aug['size'] = model.input_size[1]
        aug['mean'] = model.mean
        aug['std'] = model.std
    elif model_name == 'inceptionresnetv2':
        model = ptm.inceptionresnetv2(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Linear(model.last_linear.in_features, 2)
        aug['size'] = model.input_size[1]
        aug['mean'] = model.mean
        aug['std'] = model.std
    elif model_name == 'mobilenetv2':
        model = MobileNetV2()
        model.load_state_dict(torch.load('./auglib/models/mobilenet_v2.pth'))
        model.classifier = nn.Sequential(
            nn.Dropout(0.2),
            nn.Linear(model.last_channel, 2),
        )
        aug['size'] = 224
        aug['mean'] = [0.485, 0.456, 0.406]
        aug['std'] = [0.229, 0.224, 0.225]
    model.to(device)

    augs = Augmentations(**aug)
    model.aug_params = aug

    datasets = {
        'samples': CSVDataset(train_root, train_csv, 'image_id', 'melanoma',
                              transform=augs.tf_augment, add_extension='.jpg',
                              limit=(400, 433)),
        'train': CSVDataset(train_root, train_csv, 'image_id', 'melanoma',
                            transform=augs.tf_transform, add_extension='.jpg',
                            random_subset_size=limit_data),
        'val': CSVDatasetWithName(
            val_root, val_csv, 'image_id', 'melanoma',
            transform=augs.tf_transform, add_extension='.jpg'),
        'test': CSVDatasetWithName(
            test_root, test_csv, 'image_id', 'melanoma',
            transform=augs.tf_transform, add_extension='.jpg'),
        'test_no_aug': CSVDatasetWithName(
            test_root, test_csv, 'image_id', 'melanoma',
            transform=augs.no_augmentation, add_extension='.jpg'),
        'test_144': CSVDatasetWithName(
            test_root, test_csv, 'image_id', 'melanoma',
            transform=augs.inception_crop, add_extension='.jpg'),
    }

    dataloaders = {
        'train': DataLoader(datasets['train'], batch_size=batch_size,
                            shuffle=True, num_workers=num_workers,
                            worker_init_fn=set_seeds),
        'samples': DataLoader(datasets['samples'], batch_size=batch_size,
                              shuffle=False, num_workers=num_workers,
                              worker_init_fn=set_seeds),
    }

    save_images(datasets['samples'], to=AUGMENTED_IMAGES_DIR, n=32)
    sample_batch, _ = next(iter(dataloaders['samples']))
    save_image(make_grid(sample_batch, padding=0),
               os.path.join(AUGMENTED_IMAGES_DIR, 'grid.jpg'))

    criterion = nn.CrossEntropyLoss()

    optimizer = optim.SGD(model.parameters(),
                          lr=0.001,
                          momentum=0.9,
                          weight_decay=0.001)

    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.1,
                                                     min_lr=1e-5,
                                                     patience=8)
    metrics = {
        'train': pd.DataFrame(columns=['epoch', 'loss', 'acc', 'auc']),
        'val': pd.DataFrame(columns=['epoch', 'loss', 'acc', 'auc'])
    }

    best_val_auc = 0.0
    best_epoch = 0
    epochs_without_improvement = 0
    if images_per_epoch:
        batches_per_epoch = images_per_epoch // batch_size
    else:
        batches_per_epoch = None

    for epoch in range(epochs):
        print('train epoch {}/{}'.format(epoch+1, epochs))
        epoch_train_result = train_epoch(
            device, model, dataloaders, criterion, optimizer,
            batches_per_epoch)

        metrics['train'] = metrics['train'].append(
            {**epoch_train_result, 'epoch': epoch}, ignore_index=True)
        print('train', epoch_train_result)

        epoch_val_result, _ = test_with_augmentation(
            model, datasets['val'], device, num_workers, val_samples)

        metrics['val'] = metrics['val'].append(
            {**epoch_val_result, 'epoch': epoch}, ignore_index=True)
        print('val', epoch_val_result)
        print('-' * 40)

        scheduler.step(epoch_val_result['loss'])

        if epoch_val_result['auc'] > best_val_auc:
            best_val_auc = epoch_val_result['auc']
            best_val_result = epoch_val_result
            best_epoch = epoch
            epochs_without_improvement = 0
            torch.save(model, BEST_MODEL_PATH)
        else:
            epochs_without_improvement += 1

        if epochs_without_improvement > early_stopping_patience:
            last_val_result = epoch_val_result
            torch.save(model, LAST_MODEL_PATH)
            break

        if epoch == (epochs-1):
            last_val_result = epoch_val_result
            torch.save(model, LAST_MODEL_PATH)

    for phase in ['train', 'val']:
        metrics[phase].epoch = metrics[phase].epoch.astype(int)
        metrics[phase].to_csv(os.path.join(fs_observer.dir, phase + '.csv'),
                              index=False)

    # Run testing
    # TODO: reduce code repetition
    test_result, preds = test_with_augmentation(
        torch.load(BEST_MODEL_PATH), datasets['test'], device,
        num_workers, test_samples)
    print('[best] test', test_result)

    test_noaug_result, preds_noaug = test_with_augmentation(
        torch.load(BEST_MODEL_PATH), datasets['test_no_aug'], device,
        num_workers, 1)
    print('[best] test (no augmentation)', test_noaug_result)

    test_result_last, preds_last = test_with_augmentation(
        torch.load(LAST_MODEL_PATH), datasets['test'], device,
        num_workers, test_samples)
    print('[last] test', test_result_last)

    test_noaug_result_last, preds_noaug_last = test_with_augmentation(
        torch.load(LAST_MODEL_PATH), datasets['test_no_aug'], device,
        num_workers, 1)
    print('[last] test (no augmentation)', test_noaug_result_last)

    # Save predictions
    preds.to_csv(os.path.join(fs_observer.dir, 'test-aug-best.csv'),
                 index=False, columns=['image', 'label', 'score'])
    preds_noaug.to_csv(os.path.join(fs_observer.dir, 'test-noaug-best.csv'),
                 index=False, columns=['image', 'label', 'score'])
    preds_last.to_csv(os.path.join(fs_observer.dir, 'test-aug-last.csv'),
                 index=False, columns=['image', 'label', 'score'])
    preds_noaug_last.to_csv(os.path.join(fs_observer.dir, 'test-noaug-last.csv'),
                 index=False, columns=['image', 'label', 'score'])

    # TODO: Avoid repetition.
    #       use ordereddict, or create a pandas df before saving
    with open(RESULTS_CSV_PATH, 'a') as file:
        file.write(','.join((
            EXP_NAME,
            str(EXP_ID),
            str(split_id),
            str(best_epoch),
            str(best_val_result['loss']),
            str(best_val_result['acc']),
            str(best_val_result['auc']),
            str(best_val_result['avp']),
            str(best_val_result['sens']),
            str(best_val_result['spec']),
            str(last_val_result['loss']),
            str(last_val_result['acc']),
            str(last_val_result['auc']),
            str(last_val_result['avp']),
            str(last_val_result['sens']),
            str(last_val_result['spec']),
            str(best_val_auc),
            str(test_result['auc']),
            str(test_result_last['auc']),
            str(test_result['acc']),
            str(test_result_last['acc']),
            str(test_result['spec']),
            str(test_result_last['spec']),
            str(test_result['sens']),
            str(test_result_last['sens']),
            str(test_result['avp']),
            str(test_result_last['avp']),
            str(test_noaug_result['auc']),
            str(test_noaug_result_last['auc']),
            str(test_noaug_result['acc']),
            str(test_noaug_result_last['acc']),
            str(test_noaug_result['spec']),
            str(test_noaug_result_last['spec']),
            str(test_noaug_result['sens']),
            str(test_noaug_result_last['sens']),
            str(test_noaug_result['avp']),
            str(test_noaug_result_last['avp']),
            )) + '\n')

    return (test_noaug_result['auc'],
            test_result['auc'],
            )
Example #18
0
    def __init__(self,
                 config_file: Optional[str] = None,
                 override_list: List[Any] = []):
        _C = CN()
        _C.VALID_IMAGES = [
            'CXR1576_IM-0375-2001.png', 'CXR1581_IM-0378-2001.png',
            'CXR3177_IM-1497-2001.png', 'CXR2585_IM-1082-1001.png',
            'CXR1125_IM-0082-1001.png', 'CXR3_IM-1384-2001.png',
            'CXR1565_IM-0368-1001.png', 'CXR1105_IM-0072-1001-0001.png',
            'CXR2874_IM-1280-1001.png', 'CXR1886_IM-0574-1001.png'
        ]

        _C.MODELS = [{
            'resnet18': (pretrainedmodels.resnet18(pretrained=None), 512, 224),
            'resnet50':
            (pretrainedmodels.resnet50(pretrained=None), 2048, 224),
            'resnet101':
            (pretrainedmodels.resnet101(pretrained=None), 2048, 224),
            'resnet152':
            (pretrainedmodels.resnet152(pretrained=None), 2048, 224),
            'inception_resnet_v2':
            (pretrainedmodels.inceptionresnetv2(pretrained=None), 1536, 299)
        }]

        # _C.MODELS_FEATURE_SIZE = {'resnet18':512, 'resnet50':2048, 'resnet101':2048, 'resnet152':2048,
        #                           'inception_v3':2048, 'inception_resnet_v2':1536}

        # Random seed for NumPy and PyTorch, important for reproducibility.
        _C.RANDOM_SEED = 42
        # Opt level for mixed precision training using NVIDIA Apex. This can be
        # one of {0, 1, 2}. Refer NVIDIA Apex docs for their meaning.
        _C.FP16_OPT = 2

        # Path to the dataset root, which structure as per README. Path is
        # assumed to be relative to project root.
        _C.IMAGE_PATH = '/netscratch/gsingh/MIMIC_CXR/DataSet/Indiana_Chest_XRay/Images_2'
        _C.TRAIN_JSON_PATH = '/netscratch/gsingh/MIMIC_CXR/DataSet/Indiana_Chest_XRay/iu_xray_train_2.json'
        _C.VAL_JSON_PATH = '/netscratch/gsingh/MIMIC_CXR/DataSet/Indiana_Chest_XRay/iu_xray_val_2.json'
        _C.TEST_JSON_PATH = '/netscratch/gsingh/MIMIC_CXR/DataSet/Indiana_Chest_XRay/iu_xray_test_2.json'
        _C.PRETRAINED_EMDEDDING = False
        # Path to .vocab file generated by ``sentencepiece``.
        _C.VOCAB_FILE_PATH = "/netscratch/gsingh/MIMIC_CXR/DataSet/Indiana_Chest_XRay/Vocab/indiana.vocab"
        # Path to .model file generated by ``sentencepiece``.
        _C.VOCAB_MODEL_PATH = "/netscratch/gsingh/MIMIC_CXR/DataSet/Indiana_Chest_XRay/Vocab/indiana.model"
        _C.VOCAB_SIZE = 3000
        _C.EPOCHS = 1024
        _C.BATCH_SIZE = 10
        _C.TEST_BATCH_SIZE = 100
        _C.ITERATIONS_PER_EPOCHS = 1
        _C.WEIGHT_DECAY = 1e-5
        _C.NUM_LABELS = 41
        _C.IMAGE_SIZE = 299
        _C.MAX_SEQUENCE_LENGTH = 130
        _C.DROPOUT_RATE = 0.1
        _C.D_HEAD = 64

        _C.TRAIN_DATASET_LENGTH = 25000
        _C.INFERENCE_TIME = False
        _C.COMBINED_N_LAYERS = 1
        _C.BEAM_SIZE = 50
        _C.PADDING_INDEX = 0
        _C.EOS_INDEX = 3
        _C.SOS_INDEX = 2
        _C.USE_BEAM_SEARCH = True
        _C.EXTRACTED_FEATURES = False
        _C.IMAGE_MODEL_PATH = '/netscratch/gsingh/MIMIC_CXR/Results/Image_Feature_Extraction/MIMIC_CXR_No_ES/model.pth'

        _C.EMBEDDING_DIM = 8192
        _C.CONTEXT_SIZE = 1024
        _C.LR_COMBINED = 1e-4
        _C.MAX_LR = 1e-1
        _C.SAVED_DATASET = False
        _C.MODEL_NAME = 'inception_resnet_v2'
        INIT_PATH = '/netscratch/gsingh/MIMIC_CXR/Results/Modified_Transformer/Indiana_15_10_2020_2/'
        _C.SAVED_DATASET_PATH_TRAIN = INIT_PATH + 'DataSet/train_dataloader.pth'
        _C.SAVED_DATASET_PATH_VAL = INIT_PATH + 'DataSet/val_dataloader.pth'
        _C.SAVED_DATASET_PATH_TEST = INIT_PATH + 'DataSet/test_dataloader.pth'

        _C.CHECKPOINT_PATH = INIT_PATH + 'CheckPoints'
        _C.MODEL_PATH = INIT_PATH + 'combined_model.pth'
        _C.MODEL_STATE_DIC = INIT_PATH + 'combined_model_state_dic.pth'
        _C.FIGURE_PATH = INIT_PATH + 'Graphs'
        _C.CSV_PATH = INIT_PATH
        _C.TEST_CSV_PATH = INIT_PATH + 'test_output_image_feature_input.json'
        self._C = _C
        if config_file is not None:
            self._C.merge_from_file(config_file)
        self._C.merge_from_list(override_list)

        self.add_derived_params()

        # Make an instantiated object of this class immutable.
        self._C.freeze()
Example #19
0
def create_model(model_key, pretrained, num_of_classes, use_gpu):
    """Create CNN model

    Args:
        model_key (str): Name of the model to be created.
        pretrained: If True, only train the laster layer.
        num_of_classes (int): Number of categories of outputs.

    Returns:
        model_conv: A defined model to be train

    Raises:
        ValueError: Error while asking an unrecognized model.
    """
    if model_key == 'resnet18':
        model_conv = torchvision.models.resnet18(pretrained=True)
    elif model_key == 'resnet34':
        model_conv = torchvision.models.resnet34(pretrained=True)
    elif model_key == 'resnet50':
        model_conv = torchvision.models.resnet50(pretrained=True)
    elif model_key == 'resnet101':
        model_conv = torchvision.models.resnet101(pretrained=True)
    elif model_key == 'inception_v3':
        model_conv = torchvision.models.inception_v3(pretrained=True)
    elif model_key == 'inceptionresnetv2':
        model_conv = pretrainedmodels.inceptionresnetv2(num_classes=1000,
                                                        pretrained='imagenet')
    elif model_key == 'inceptionv4':
        model_conv = pretrainedmodels.inceptionv4(num_classes=1000,
                                                  pretrained='imagenet')
    elif model_key == 'nasnetalarge':
        model_conv = pretrainedmodels.nasnetalarge(num_classes=1000,
                                                   pretrained='imagenet')
    else:
        raise ValueError("Unrecognized name of model {}".format(model_key))

    if pretrained:
        # Lock parameters for transfer learning
        for param in model_conv.parameters():
            param.requires_grad = False

    # Parameters of newly constructed modules have requires_grad=True by default
    if model_key == 'nasnetalarge' or model_key == 'inceptionresnetv2' or model_key == 'inceptionv4':
        dim_feats = model_conv.last_linear.in_features  # 2048
        model_conv.last_linear = nn.Linear(dim_feats, num_of_classes)
    else:
        num_ftrs = model_conv.fc.in_features
        model_conv.fc = nn.Linear(num_ftrs, num_of_classes)

    # Initialize newly added module parameters
    if model_key == 'nasnetalarge' or model_key == 'inceptionresnetv2' or model_key == 'inceptionv4':
        nn.init.xavier_uniform(model_conv.last_linear.weight)
        nn.init.constant(model_conv.last_linear.bias, 0)
    else:
        nn.init.xavier_uniform(model_conv.fc.weight)
        nn.init.constant(model_conv.fc.bias, 0)

    if use_gpu:
        model_conv = model_conv.cuda()

    return model_conv
Example #20
0
 def __init__(self, num_classes, pretrained="imagenet"):
     super().__init__()
     self.net = inceptionresnetv2(pretrained=pretrained)
     self.net.avgpool_1a = AdaptiveConcatPool2d()
     self.net.last_linear = nn.Sequential(Flatten(), SEBlock(1536 * 2),
                                          nn.Linear(1536 * 2, num_classes))
    elif params['model'] == 'resnet152':
        C, H, W = 3, 224, 224
        model = pretrainedmodels.resnet152(num_classes=1000,
                                           pretrained='imagenet')
        load_image_fn = utils.LoadTransformImage(model)

    elif params['model'] == 'inception_v4':
        C, H, W = 3, 299, 299
        model = pretrainedmodels.inceptionv4(num_classes=1000,
                                             pretrained='imagenet')
        load_image_fn = utils.LoadTransformImage(model)

    elif params['model'] == 'inceptionresnetv2':
        C, H, W = 3, 299, 299
        model = pretrainedmodels.inceptionresnetv2(num_classes=1000,
                                                   pretrained='imagenet')
        load_image_fn = utils.LoadTransformImage(model)

    else:
        print("doesn't support %s" % (params['model']))

    model.last_linear = utils.Identity()
    model = nn.DataParallel(model)
    model = model.to(args.device)

    extract_feats(params, model, load_image_fn)

# 摘录来的
# class Identity(nn.Module):
#
#     def __init__(self):
Example #22
0
parser.add_argument('--logfile', default='result')
# iterno
parser.add_argument('--iterNo', default='1')

args = parser.parse_args()

import os
os.environ["CUDA_VISIBLE_DEVICES"] = args.c

model = args.model
if model == 'resnet152' or model == 'resnet152_3c':
    original_model = torchvision.models.resnet152(pretrained=True)
elif model == 'resnet50':
    original_model = torchvision.models.resnet50(pretrained=True)
elif model == 'inceptionresnetv2':
    original_model = pretrainedmodels.inceptionresnetv2(pretrained='imagenet')
    #original_model = torchvision.models.inception_v3(pretrained=True)
elif model == 'densenet161':
    original_model = torchvision.models.densenet161(pretrained=True)
elif model == 'vgg16':
    original_model = torchvision.models.vgg16(pretrained=True)
elif model == 'squeezenet':
    original_model = torchvision.models.squeezenet1_0(pretrained=True)
else:
    sys.exit(-1)

#for name, param in original_model.named_children():
#   print(name)
net = FineTuneModel(original_model, model)
#for name, param in net.named_children():
#   print(name)
Example #23
0
    def __init__(self, num_filters=256):
        """Creates an `FPN` instance for feature extraction.
        Args:
          num_filters: the number of filters in each output pyramid level
          pretrained: use ImageNet pre-trained backbone feature extractor
        """

        super().__init__()
        self.inception = inceptionresnetv2(num_classes=1000,
                                           pretrained='imagenet')
        norm_layer = functools.partial(nn.InstanceNorm2d,
                                       affine=False,
                                       track_running_stats=True)

        self.enc0 = self.inception.conv2d_1a
        self.enc1 = nn.Sequential(
            self.inception.conv2d_2a,
            self.inception.conv2d_2b,
            self.inception.maxpool_3a,
        )  # 64
        self.enc2 = nn.Sequential(
            self.inception.conv2d_3b,
            self.inception.conv2d_4a,
            self.inception.maxpool_5a,
        )  # 192
        self.enc3 = nn.Sequential(
            self.inception.mixed_5b,
            self.inception.repeat,
            self.inception.mixed_6a,
        )  # 1088
        self.enc4 = nn.Sequential(
            self.inception.repeat_1,
            self.inception.mixed_7a,
        )  # 2080

        self.pad = nn.ReflectionPad2d(1)
        self.lateral4 = nn.Conv2d(2080, num_filters, kernel_size=1, bias=False)
        self.lateral3 = nn.Conv2d(1088, num_filters, kernel_size=1, bias=False)
        self.lateral2 = nn.Conv2d(192, num_filters, kernel_size=1, bias=False)
        self.lateral1 = nn.Conv2d(64, num_filters, kernel_size=1, bias=False)
        self.lateral0 = nn.Conv2d(32,
                                  num_filters // 2,
                                  kernel_size=1,
                                  bias=False)

        # # cfe2
        # cfe3
        self.conv21 = nn.Conv2d(1088, 32, (1, 1), padding=0)
        self.conv22 = nn.Conv2d(1088, 32, (3, 3), dilation=3, padding=3)
        self.conv23 = nn.Conv2d(1088, 32, (3, 3), dilation=5, padding=5)
        self.conv24 = nn.Conv2d(1088, 32, (3, 3), dilation=7, padding=7)
        self.bn5 = nn.BatchNorm2d(num_features=128, affine=False)
        # cfe4
        self.conv25 = nn.Conv2d(2080, 32, (1, 1), padding=0)
        self.conv26 = nn.Conv2d(2080, 32, (3, 3), dilation=3, padding=3)
        self.conv27 = nn.Conv2d(2080, 32, (3, 3), dilation=5, padding=5)
        self.conv28 = nn.Conv2d(2080, 32, (3, 3), dilation=7, padding=7)
        self.bn6 = nn.BatchNorm2d(num_features=128, affine=False)
        # channel wise attention
        self.cha_att = ChannelwiseAttention(
            in_channels=256
        )  #### 这里的 256 =C3:128+C4:128  128 = 32+32+32+32 这里是可以调整的
        # self.linear1 = nn.Linear(256, 56)
        # self.linear2 = nn.Linear(56, 256)
        self.conv29 = nn.Conv2d(256, 256, (1, 1), padding=0)
        self.bn7 = nn.BatchNorm2d(num_features=256, affine=False)
        self.relu = nn.ReLU()
        for param in self.inception.parameters():
            param.requires_grad = False
def train(mission_id,
          BATCH_SIZE,
          EPOCH,
          LR,
          LR_DECAY,
          DECAY_EPOCH=(30, 50, 70)):
    # Hyper Parameter
    DECAY_EPOCH = [int(x) for x in DECAY_EPOCH]
    # mission_id = 1

    mission_kind = dict_mission[mission_id]
    log_dir = '/media/tang/code/tianchi/logs/' + mission_kind
    model_dir = '/media/tang/code/tianchi/models/' + mission_kind

    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)

    split_percent = 0.1
    IMAGE_CLASS = label_count[mission_kind]

    log_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
    log_txt = mission_kind + ' googlenet {} BZ_{} EPO_{} LR_{} LR_DE_{} DE_EPO_{}'. \
        format(log_time, BATCH_SIZE, EPOCH, LR, LR_DECAY, DECAY_EPOCH)
    log_path = os.path.join(log_dir, log_txt)
    log_txt = open(log_path, 'wb')

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_transform = transforms.Compose([
        transforms.RandomRotation(5, resample=False, expand=False,
                                  center=None),
        # transforms.RandomResizedCrop(224, scale=(0.8, 1.0), ratio=(4./3., 3./4.)),
        # transforms.ColorJitter(brightness=0.05, contrast=0.05, saturation=0.05, hue=0.05),
        transforms.RandomCrop(299),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
    ])

    val_transform = transforms.Compose([
        # transforms.CenterCrop(224),
        # transforms.TenCrop()
        transforms.ToTensor(),
        normalize,
    ])

    all_data_list = load_train_label()[mission_kind]
    np.random.shuffle(all_data_list)
    train_data_list, val_data_list = split_data(all_data_list, split_percent)

    train_data = MyDataset(label_list=train_data_list,
                           transform=train_transform,
                           loader=default_loader)
    val_data = MyDataset(label_list=val_data_list,
                         transform=val_transform,
                         loader=valdata_loader)
    train_loader = Data.DataLoader(dataset=train_data,
                                   batch_size=BATCH_SIZE,
                                   shuffle=True,
                                   num_workers=1)
    val_loader = Data.DataLoader(dataset=val_data,
                                 batch_size=BATCH_SIZE,
                                 num_workers=1)
    dataset_sizes = {'train': len(train_data), 'val': len(val_data)}

    print '*********** train: ', mission_id, ':', mission_kind, '*************'
    print mission_kind + ': train pics::', dataset_sizes[
        'train'], 'val pics:', dataset_sizes['val']

    model = pretrainedmodels.inceptionresnetv2(pretrained='imagenet')
    print model

    # model = models.inception_v3(pretrained=True)
    # model.fc = nn.Linear(2048, IMAGE_CLASS)
    # model.AuxLogits.fc = nn.Linear(768, IMAGE_CLASS)

    # model = models.resnet50(pretrained=True)
    # model.fc = nn.Linear(2048, IMAGE_CLASS)

    # model = torch.load('/media/tang/code/data/models/mixup_models/mixup_cub_googlenet_159_0.7948.pkl')

    # print model
    # for i, param in enumerate(model.parameters()):
    #   param.requires_grad = True
    #   print
    # multi gpu
    # model = DataParallel(model)
    model.cuda()

    # Optimize only the classifier
    optimizer = torch.optim.SGD(model.parameters(), lr=LR, momentum=0.9)
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=DECAY_EPOCH,
                                         gamma=LR_DECAY)
    loss_func = torch.nn.CrossEntropyLoss()

    TOP_ACC = 0.70

    for epoch in range(1, EPOCH + 1):
        scheduler.step()
        lr = scheduler.get_lr()

        # train
        # lr = LR * (LR_DECAY ** (epoch // DECAY_EPOCH))
        # for param_group in optimizer.param_groups:
        #     param_group['lr'] = lr
        info = 'Epoch:{}/{} Lr={:.4f} | '.format(epoch, EPOCH, float(lr[0]))

        model.train()
        time_s = time.time()

        running_loss = 0
        running_corrects = 0
        for step, (x, y) in enumerate(train_loader):
            x, y = Variable(x).cuda(), Variable(y).cuda()
            out1, out2 = model(x)
            loss = loss_func(out1, y) + loss_func(out2, y)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            _, preds = torch.max(out1.data + out2.data, 1)
            running_loss += loss.data[0] * x.size(0)
            running_corrects += torch.sum(preds == y.data)

        use_time = time.time() - time_s
        train_speed = dataset_sizes['train'] // use_time
        epoch_loss = running_loss / dataset_sizes['train']
        epoch_acc = 1.0 * running_corrects / dataset_sizes['train']
        info += '{} Loss:{:.4f} Acc:{:.4f} '.format('train', epoch_loss,
                                                    epoch_acc)
        info += '[{:.1f}mins/{}pics] | '.format(use_time / 60.0, train_speed)

        # val valset
        model.eval()
        time_s = time.time()
        running_loss = 0
        running_corrects = 0

        for step, (x, y) in enumerate(val_loader):
            # x.ivolatile = True
            x, y = Variable(x).cuda(), Variable(y).cuda()
            out = model(x)
            loss = loss_func(out, y)

            _, preds = torch.max(out.data, 1)
            running_loss += loss.data[0] * x.size(0)
            running_corrects += torch.sum(preds == y.data)

        use_time = time.time() - time_s
        train_speed = dataset_sizes['val'] // use_time
        epoch_loss = running_loss / dataset_sizes['val']
        epoch_acc = 1.0 * running_corrects / dataset_sizes['val']
        info += '{} Loss:{:.4f} Acc:{:.4f} '.format('val', epoch_loss,
                                                    epoch_acc)
        info += '[{:.1f}mins/{}pics]'.format(use_time / 60.0, train_speed)
        print info
        log_txt.write(info + '\n')
        if TOP_ACC < epoch_acc:
            TOP_ACC = epoch_acc
            model_sava_path = os.path.join(
                model_dir,
                mission_kind + '_googlenet_%s_%.4f.pkl' % (epoch, epoch_acc))
            torch.save(model, model_sava_path)
            torch.save(model, os.path.join(model_dir, 'best_model.pkl'))
            acc_info = 'save epoch:{} acc:{}'.format(epoch, epoch_acc)
            print acc_info
            log_txt.write(acc_info + '\n')
    log_txt.close()