コード例 #1
0
ファイル: model.py プロジェクト: vaibhavdih/backprop
    def __init__(self,
                 model_path: str = "efficientnet-b0",
                 init_model=None,
                 **kwargs):
        Finetunable.__init__(self)

        self.image_size = EfficientNet_pt.get_image_size(model_path)
        self.num_classes = 1000

        if init_model is None:
            init_model = partial(EfficientNet_pt.from_pretrained,
                                 num_classes=self.num_classes)

        with open(download(IMAGENET_LABELS_URL, "efficientnet"), "r") as f:
            self.labels = json.load(f)

        self.tfms = transforms.Compose([
            transforms.Resize(self.image_size, interpolation=Image.BICUBIC),
            transforms.CenterCrop(self.image_size),
            lambda image: image.convert("RGB"),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ])

        PathModel.__init__(self, model_path, init_model, **kwargs)

        self.name = model_path
        self.description = "EfficientNet is an image classification model that achieves state-of-the-art accuracy while being an order-of-magnitude smaller and faster than previous models. Trained on ImageNet's 1000 categories."
        self.tasks = ["image-classification"]
コード例 #2
0
def main(argv):
    usage = """ python3 app.py -m <model_type>
for instance :
python3 app.py -m 0 for b0 efficient net
Details about the models are below:
             
Name              # Params   Top-1 Acc.  Pretrained?
efficientnet-b0     5.3M       76.3        ✓
efficientnet-b1     7.8M       78.8        ✓
efficientnet-b2     9.2M       79.8        ✓
efficientnet-b3     12M        81.1        ✓
efficientnet-b4     19M        82.6        ✓
efficientnet-b5     30M        83.3        ✓
efficientnet-b6     43M        84.0        -
efficientnet-b7     66M        84.4        -
"""

    global model, labels_map, image_size, directory

    try:
        opts, args = getopt.getopt(argv, "hm:", ["help", "model"])

    except getopt.GetoptError:
      print(usage)
      sys.exit(2)
    for opt, arg in opts:
      print(opt)
      print(arg)
      if opt in ("-h", "--help"):
        print(usage)
        sys.exit()
      elif opt in ("-m", "--model"):
          model_type = int(arg)

    directory = 'upload'

    if not os.path.exists(directory):
       os.makedirs(directory)

    try:
        model_name = 'efficientnet-b' + str(model_type)
    except:
        print("Error : missing model number")
        print(usage)
        sys.exit(2)
    
    print("API running with " + model_name)
    model = EfficientNet.from_pretrained(model_name)
    image_size = EfficientNet.get_image_size(model_name)

    labels_map = json.load(open('labels_map.txt'))
    labels_map = [labels_map[str(i)] for i in range(1000)]


    model.eval()

    port = 5000
    host = '0.0.0.0'

    app.run(host=host, port=port, threaded=True)
コード例 #3
0
def get_imagenet_validation(args):
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    if 'efficientnet' in args.arch:
        image_size = EfficientNet.get_image_size(args.arch.replace('_', '-'))
        val_transforms = transforms.Compose([
            transforms.Resize(image_size, interpolation=PIL.Image.BICUBIC),
            transforms.CenterCrop(image_size),
            transforms.ToTensor(),
            normalize,
        ])
    else:
        val_transforms = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        os.path.join(args.val_dir, 'imagenet', 'val'), val_transforms),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    return val_loader
コード例 #4
0
def load_efficientnet(saved_model_path):
    model_name = "efficientnet-b5"
    model = EfficientNetClassifier(n_classes=10)
    # model = EfficientNet.from_pretrained(model_name)
    image_size = EfficientNet.get_image_size(model_name)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    model.load_state_dict(torch.load(saved_model_path))
    return model, image_size, device
コード例 #5
0
def save_efficientnet_from_pytorch(model_name, model_path):
    image_size = EfficientNet.get_image_size(model_name)
    # Load model
    model = EfficientNet.from_pretrained(model_name)
    model.set_swish(memory_efficient=False)  # swish->x*sigmoid(x)
    model.eval()
    # Dummy input for ONNX
    dummy_input = torch.randn(1, 3, 224, 224)
    # Export with ONNX
    torch.onnx.export(model, dummy_input, model_path, verbose=True)
コード例 #6
0
def make_data():
    imagelist = SSIC.get_train_imagelist(args.validate_ratio)
    imagelist = imagelist.transform(
        # TODO: I saw some function somehwere that added proven fastai defaults
        tfms=([rotate(degrees=(-90, 90), p=1)], []),
        size=EfficientNet.get_image_size(args.model_name),
        resize_method=ResizeMethod.SQUISH)
    data = imagelist.databunch(bs=args.batch_size).normalize(imagenet_stats)
    # TODO: data.show_batch(3, figsize=(9, 9))
    return data
コード例 #7
0
def main_worker():
    writer = SummaryWriter()
    model = EfficientNet.from_name(opt.arch,
                                   override_params={
                                       'num_classes': opt.num_class
                                   }).cuda()
    if opt.loss_kind == 'CE':
        criterion = nn.CrossEntropyLoss().cuda()
    elif opt.loss_kind == 'Focal':
        criterion = Focal(alpha=[2.5, 2.5, 2.5, 2.5, 2.5, 2.5],
                          num_class=opt.num_class).cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                opt.lr,
                                momentum=opt.momentum,
                                weight_decay=opt.weight_decay)
    cudnn.benchmark = True
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    opt.image_size = EfficientNet.get_image_size(opt.arch)
    train_dataset = MyDataset(opt.train_data,
                              transform=transforms.Compose([
                                  transforms.Resize(opt.image_size + 100),
                                  transforms.RandomRotation(360),
                                  transforms.CenterCrop(opt.image_size),
                                  transforms.ToTensor(), normalize,
                                  Cutout(opt.cutout_n, opt.cutout_len)
                              ]))  #Cutout(opt.cutout_n,opt.cutout_len),
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.worker,
                                               pin_memory=True)
    #val data
    test_dataset = MyDataset(opt.test_data,
                             transform=transforms.Compose([
                                 transforms.Resize(opt.image_size + 100),
                                 transforms.RandomRotation(360),
                                 transforms.RandomCrop(opt.image_size),
                                 transforms.RandomHorizontalFlip(),
                                 transforms.RandomVerticalFlip(),
                                 transforms.ToTensor(), normalize
                             ]))
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=opt.test_batch_size,
                                              shuffle=False,
                                              num_workers=opt.worker,
                                              pin_memory=True)
    for i in range(0, opt.epochs):
        adjust_learning_rate(optimizer, i, opt)
        train(model, train_loader, test_loader, criterion, optimizer, writer)
        #val_acc,val_loss = validate(model,test_loader,criterion)
        #print("{} epoch,acc:{},loss{}".format(i+1,val_acc,val_loss))
    writer.close()
コード例 #8
0
 def __init__(self, model_name="efficientnet-b0"):
     self.model_name = model_name
     self.TARGET_IMG_SIZE = EfficientNet.get_image_size(self.model_name)
     self.transform = transforms.Compose([
         transforms.Resize((self.TARGET_IMG_SIZE, self.TARGET_IMG_SIZE)),
         # transforms.CenterCrop(input_size),
         transforms.ToTensor(),
         transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
     ])
     self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
     self.model = EfficientNet.from_pretrained(self.model_name)
     self.model.to(self.device)
     self.model.eval()
コード例 #9
0
def get_dataset(tfms_list=list(), model_name="b0"):
    resolution = EfficientNet.get_image_size("efficientnet-" + model_name)
    tfms = transforms.Compose(
        [transforms.Resize(resolution),
         transforms.CenterCrop(resolution)] + tfms_list)
    cal_tech101 = ImageFolder(
        root="/home/loc/projects/efficient_net/data/101_ObjectCategories",
        transform=tfms)
    # Train 60%, val 20%, test 20%
    train_size = int(0.6 * len(cal_tech101))
    test_size = int(0.2 * len(cal_tech101))
    val_size = len(cal_tech101) - train_size - test_size
    train_data, val_data, test_data = random_split(
        cal_tech101, [train_size, val_size, test_size])
    return train_data, val_data, test_data
コード例 #10
0
ファイル: train.py プロジェクト: mindw96/cough_detection
def efficientnet_load(model_name):
    model_name = model_name
    print(model_name, 'load complete')
    image_size = EfficientNet.get_image_size(model_name)
    model = EfficientNet.from_pretrained(model_name,
                                         in_channels=3,
                                         num_classes=50,
                                         batch_norm_momentum=0.99,
                                         batch_norm_epsilon=1e-3,
                                         dropout_rate=0.4,
                                         drop_connect_rate=0.3,
                                         depth_divisor=8,
                                         include_top=True)

    return model
コード例 #11
0
ファイル: our_data_loader.py プロジェクト: VITA-Group/MAD
def my_effnet_loader(batch_size):
    import PIL
    from efficientnet_pytorch import EfficientNet
    image_size = EfficientNet.get_image_size('efficientnet-b7')
    print('image_size:', image_size)
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    test_transform = transforms.Compose([
           transforms.Resize(image_size, interpolation=PIL.Image.BICUBIC),
           transforms.CenterCrop(image_size),
           transforms.ToTensor(),
           normalize,
       ])
    test_dataset = Dataset(transform=test_transform)
    test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)
    return test_loader
コード例 #12
0
ファイル: archs.py プロジェクト: ideafisher/ssUnet-GAN
    def __init__(self, model_info):
        super(AttentiveCNN, self).__init__()

        self.f_channel = 1408
        eff_net_flag = model_info['eff_flag']
        is_train = model_info['phase_train']
        if eff_net_flag == True:
            model_name = model_info['eff_model_name']
            # model_name= 'efficientnet-b2'
            pretrained_base = '../pretrained/normal/'
            image_size = EfficientNet.get_image_size(model_name)

            print('==> Building model.. : ', model_name)
            device = 'cuda' if torch.cuda.is_available() else 'cpu'
            if is_train == True:
                model = EfficientNet.from_pretrained(model_name,
                                                     pretrained_base)
            else:
                model = EfficientNet.from_name(model_name)

            if model_name == 'efficientnet-b2':
                self.f_channel = 1408
            if model_name == 'efficientnet-b3':
                self.f_channel = 1536
            if model_name == 'efficientnet-b4':
                self.f_channel = 1792
            if model_name == 'efficientnet-b5':
                self.f_channel = 2048
            eff_conv = model.to(device)
            num_ftrs = model._fc.in_features
            self.eff_conv = eff_conv
            self.input_img_size = image_size
            self.eff_channel = 1024
            self.conv_a = nn.Conv2d(self.f_channel,
                                    self.eff_channel,
                                    kernel_size=1,
                                    bias=False)
        else:
            # ResNet-152 backend
            # resnet = models.resnet152( pretrained=True )
            resnet = models.resnet101(pretrained=True)
            modules = list(resnet.children()
                           )[:-2]  # delete the last fc layer and avg pool.
            resnet_conv = nn.Sequential(*modules)  # last conv feature
            self.resnet_conv = resnet_conv
            self.input_img_size = 224

        self.eff_net_flag = eff_net_flag
コード例 #13
0
class EfficientClassifier:
    MODEL_NAME = "efficientnet-b7"
    IMAGE_SIZE = EfficientNet.get_image_size(MODEL_NAME)  # 224
    model = None
    labels_map = None

    def __init__(self):
        self._initialize_model()

    def _initialize_model(self):
        path = str(Path(os.path.abspath(__file__)).parents[1])
        labels_map = json.load(open(path + "/labels_map.txt"))
        self.labels_map = [labels_map[str(i)] for i in range(1000)]
        self.model = EfficientNet.from_pretrained(self.MODEL_NAME)
        self.model.eval()

    def _preprocess_image(self, image):
        tfms = transforms.Compose(
            [
                transforms.Resize(self.IMAGE_SIZE),
                transforms.CenterCrop(self.IMAGE_SIZE),
                transforms.ToTensor(),
                transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
            ]
        )
        image = tfms(image).unsqueeze(0)
        return image

    def predict(self, img_path) -> json:
        image = Image.open(img_path)
        preprocessed_image = self._preprocess_image(image)

        with torch.no_grad():
            logits = self.model(preprocessed_image)
        preds = torch.topk(logits, k=5).indices.squeeze(0).tolist()

        predicts_json = {"predicts": []}
        print("-----")
        for idx in preds:
            label = self.labels_map[idx]
            prob = torch.softmax(logits, dim=1)[0, idx].item()
            print("{:<75} ({:.2f}%)".format(label, prob * 100))
            predicts_json["predicts"].append(
                {"label": label, "prob": format(prob * 100, ".2f")}
            )
        return predicts_json
コード例 #14
0
def tell(img):

    # 创建网络模型
    model = EfficientNet.from_pretrained(model_name, num_classes=num_classes)
    if use_gpu:
        model.cuda()

    # 损失函数和优化器
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                lr,
                                momentum=momentum,
                                weight_decay=weight_decay)

    # 是加载已有的模型
    print("=> loading checkpoint '{}'".format(resume_file))
    checkpoint = torch.load(resume_file)
    model.load_state_dict(checkpoint['state_dict'])
    print("=> loaded checkpoint '{}' (epoch {})".format(
        resume_file, checkpoint['epoch']))

    cudnn.benchmark = True

    image_size = EfficientNet.get_image_size(model_name)
    tfms = transforms.Compose([
        transforms.Resize(image_size, interpolation=Image.BICUBIC),
        transforms.CenterCrop(image_size),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])
    img = tfms(img).unsqueeze(0)

    # 训练得到结果

    # switch to train mode
    model.train()
    with torch.no_grad():
        logits = model(img.cuda())
    preds = torch.topk(logits, k=5)[1].squeeze(0).tolist()
    print('-----')
    for idx in preds:
        label = labels_map[idx]
        prob = torch.softmax(logits, dim=1)[0, idx].item()
        print('{:<75} ({:.2f}%)'.format(label, prob * 100))
コード例 #15
0
    def __init__(self,
                 model_path: str = "efficientnet-b0",
                 init_model=None,
                 name: str = None,
                 description: str = None,
                 tasks: List[str] = None,
                 details: Dict = None,
                 device=None):
        self.image_size = EfficientNet_pt.get_image_size(model_path)
        self.num_classes = 1000

        if init_model is None:
            init_model = partial(EfficientNet_pt.from_pretrained,
                                 num_classes=self.num_classes)

        with open(download(IMAGENET_LABELS_URL, "efficientnet"), "r") as f:
            self.labels = json.load(f)
            self.labels = {int(k): v for k, v in self.labels.items()}

        self.tfms = transforms.Compose([
            transforms.Resize(self.image_size, interpolation=Image.BICUBIC),
            transforms.CenterCrop(self.image_size),
            lambda image: image.convert("RGB"),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ])

        tasks = tasks or ["image-classification"]

        self.optimal_batch_size = 128
        self.process_image = self.tfms

        PathModel.__init__(self,
                           model_path,
                           name=name,
                           description=description,
                           details=details,
                           tasks=tasks,
                           init_model=init_model,
                           device=device)
コード例 #16
0
def build_transparent(
    model_name: str,
    pretrained: bool = False,
    extract_blocks: Iterable[int] = [5],
    num_classes: Optional[int] = None,
    freeze: bool = False,
):
    """Build an encoder model, possibly pretrained."""
    if model_name.startswith("resnet"):
        model_ft = models.__dict__[model_name](pretrained=pretrained)
        if num_classes is not None:
            num_ftrs = model_ft.fc.in_features
            model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224
        return (
            TransparentResNet(model_ft,
                              extract_blocks=extract_blocks,
                              freeze=freeze),
            input_size,
        )
    elif model_name.startswith("efficientnet"):
        if num_classes is None:
            num_classes = 1000  # Matching EfficientNet default.
        if pretrained:
            model_ft = EfficientNet.from_pretrained(model_name,
                                                    num_classes=num_classes)
        else:
            model_ft = EfficientNet.from_name(
                model_name, override_params={"num_classes": num_classes})
        input_size = EfficientNet.get_image_size(model_name)
        return (
            TransparentEfficientNet(
                model_ft,
                input_size,
                extract_blocks=extract_blocks,
                freeze=freeze,
            ),
            input_size,
        )
    else:
        raise NotImplementedError("Unknown model name {}".format(model_name))
コード例 #17
0
    def __init__(self):
        super(MyNetwork, self).__init__()
        cnn_model_name = 'efficientnet-b1'
        self.cnn_model = EfficientNet.from_pretrained(cnn_model_name,
                                                      num_classes=2).to(device)
        print(EfficientNet.get_image_size(cnn_model_name))
        self.cnn = nn.Sequential(
            nn.Linear(1280, 128),
            nn.BatchNorm1d(128),
            nn.ReLU(),
            nn.Dropout(p=0.3),
        )

        self.meta = nn.Sequential(
            nn.Linear(18, 128),
            nn.BatchNorm1d(128),
            nn.ReLU(),
            nn.Dropout(p=0.3),
            nn.Linear(128, 128),
            nn.BatchNorm1d(128),
            nn.ReLU(),
            nn.Dropout(p=0.3),
            nn.Linear(128, 128),
            nn.BatchNorm1d(128),
            nn.ReLU(),
            nn.Dropout(p=0.3),
        )

        self.post = nn.Sequential(
            nn.Linear(256, 128),
            nn.BatchNorm1d(128),
            nn.ReLU(),
            nn.Dropout(p=0.3),
            nn.Linear(128, 32),
            nn.BatchNorm1d(32),
            nn.ReLU(),
            nn.Dropout(p=0.3),
            nn.Linear(32, 2),
        )
コード例 #18
0
def initialize_model(
    model_name: str, num_classes: int, use_pretrained: bool = True
) -> Tuple[nn.Module, int]:
    model_ft = None
    input_size = 0
    if model_name.startswith("resnet"):
        """Resnet18"""
        model_ft = models.__dict__[model_name](pretrained=use_pretrained)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224
        # Wrap a FeatureExtractorResnet around this so that it gives the
        # feature output as well.
        model_ft = FeatureExtractorResnet(model_ft)

    elif model_name.startswith("efficientnet"):
        """ EfficientNet"""
        from efficientnet_pytorch import EfficientNet
        from efficientnet_pytorch.utils import (
            get_same_padding_conv2d,
            round_filters,
        )

        if num_classes is None:
            num_classes = 1000  # Matching EfficientNet default.
        if use_pretrained:
            model_ft = EfficientNet.from_pretrained(
                model_name, num_classes=num_classes
            )
        else:
            model_ft = EfficientNet.from_name(
                model_name, override_params={"num_classes": num_classes}
            )
        input_size = EfficientNet.get_image_size(model_name)
        model_ft = FeatureExtractorEfficientnet(model_ft)

    else:
        raise ValueError("Invalid model name")
    return model_ft, input_size
コード例 #19
0
def transform_image(image_bytes):
    my_transforms = transforms.Compose([
        transforms.Resize(255),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
    plt.imshow(image)
    plt.show()
    return my_transforms(image).unsqueeze(0)


model_name = 'efficientnet-b7'

image_size = EfficientNet.get_image_size(model_name)
model = EfficientNet.from_pretrained(model_name, num_classes=2)

model.load_state_dict(torch.load('class2.pt'))
model.eval()

with open("test.png", 'rb') as f:
    image_bytes = f.read()
    inputs = transform_image(image_bytes=image_bytes)

outputs = model(inputs)
_, predict = torch.max(outputs, 1)

print(predict)
コード例 #20
0
ファイル: train.py プロジェクト: zfg88287508/CarTeller
def work():

    print(opt_train.use_gpu)
    print(opt_train.num_classes)
    # 创建网络模型
    model = EfficientNet.from_pretrained(opt_train.model_name,
                                         num_classes=opt_train.num_classes)
    if opt_train.use_gpu:
        model.cuda()

    # 损失函数和优化器
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                opt_train.lr,
                                momentum=opt_train.momentum,
                                weight_decay=opt_train.weight_decay)

    # 是否加载已有的模型
    if args.resume == 'T':
        print("=> loading checkpoint '{}'".format(opt_train.resume_file))
        checkpoint = torch.load(opt_train.resume_file)
        opt_train.start_epoch = checkpoint['epoch']
        opt_train.best_acc1 = checkpoint['best_acc1']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            opt_train.resume_file, checkpoint['epoch']))

    cudnn.benchmark = True

    # 加载训练数据
    #    训练集
    train_dataset = CarsDataset('train', opt_train.data_dir, None)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=opt_train.batch_size,
        shuffle=True,
        num_workers=opt_train.num_wokers,
        pin_memory=True)

    #    验证集
    image_size = EfficientNet.get_image_size(opt_train.model_name)
    val_dataset = CarsDataset('val', opt_train.data_dir, image_size)
    val_loader = DataLoader(val_dataset,
                            batch_size=opt_train.batch_size,
                            shuffle=False,
                            num_workers=opt_train.num_wokers,
                            pin_memory=True)

    for epoch in range(opt_train.start_epoch, opt_train.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > opt_train.best_acc1
        opt_train.best_acc1 = max(acc1, opt_train.best_acc1)
        print("save:", epoch)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_acc1': opt_train.best_acc1,
                'optimizer': optimizer.state_dict(),
            }, is_best)
コード例 #21
0
parser.add_argument('--use32', action='store_true', help='train with 32*32 graph size without resizing')
parser.add_argument('--para', action='store_true', help='use multi GPU')
args = parser.parse_args()

model_file = args.network + "_cifar10.ckpt"
fig_file = args.network + "_cifar10.png"
network = NET.index(args.network)
if network <= 1 or args.use32:
    transform = transforms.ToTensor()
elif network <= 7:
    transform = transforms.Compose([
        transforms.Resize((224,224)),
        transforms.ToTensor()
    ])
elif network <= 11:
    size = EfficientNet.get_image_size('efficientnet-b{}'.format(network-8))
    transform = transforms.Compose([
        transforms.Resize((size,size)),
        transforms.ToTensor()
    ])
else:
    sys.exit(1)

# prepare data
train_Data = dsets.CIFAR10(
    root='../data_cifar10',
    train=True,
    transform=transform,
    download=False
)
コード例 #22
0
from efficientnet_pytorch import EfficientNet

from sotabencheval.image_classification import ImageNetEvaluator
from sotabencheval.utils import is_server

if is_server():
    DATA_ROOT = './.data/vision/imagenet'
else:  # local settings
    DATA_ROOT = os.environ['IMAGENET_DIR']
    assert bool(DATA_ROOT), 'please set IMAGENET_DIR environment variable'
    print('Local data root: ', DATA_ROOT)

model_name = 'EfficientNet-B5'
model = EfficientNet.from_pretrained(model_name.lower())
image_size = EfficientNet.get_image_size(model_name.lower())

input_transform = transforms.Compose([
    transforms.Resize(image_size, PIL.Image.BICUBIC),
    transforms.CenterCrop(image_size),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                          0.225]),
])

test_dataset = ImageNet(
    DATA_ROOT,
    split="val",
    transform=input_transform,
    target_transform=None,
)
コード例 #23
0
def main_worker(gpu, ngpus_per_node, args):
    global best_acc1
    args.gpu = gpu

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)
    # create model
    if 'efficientnet' in args.arch:  # NEW
        if args.pretrained:
            model = EfficientNet.from_pretrained(args.arch,
                                                 advprop=args.advprop)
            print("=> using pre-trained model '{}'".format(args.arch))
        else:
            print("=> creating model '{}'".format(args.arch))
            model = EfficientNet.from_name(args.arch)

    else:
        if args.pretrained:
            print("=> using pre-trained model '{}'".format(args.arch))
            if args.arch.find('alexnet') != -1:
                model = models.__dict__[args.arch](pretrained=True)
            elif args.arch.find('inception_v3') != -1:
                model = models.inception_v3(pretrained=True)
            elif args.arch.find('densenet121') != -1:
                model = models.densenet121(pretrained=True)
            elif args.arch.find('resnet') != -1:  # ResNet
                model = models.__dict__[args.arch](pretrained=True)
            else:
                print(
                    '### please check the args.arch for load model in training###'
                )
                exit(-1)
        else:
            print("=> creating model '{}'".format(args.arch))
            model = models.__dict__[args.arch]()

    if args.fine_tuning:
        print(
            "=> transfer-learning mode + fine-tuning (train only the last FC layer)"
        )
        # Freeze Previous Layers(now we are using them as features extractor)
        #jiangjiewei
        # for param in model.parameters():
        #    param.requires_grad = False

        # Fine Tuning the last Layer For the new task
        # juge network: alexnet, inception_v3, densennet, resnet50
        if args.arch.find('alexnet') != -1:
            num_ftrs = model.classifier[6].in_features
            model.classifier[6] = nn.Linear(num_ftrs, 3)
        elif args.arch.find('inception_v3') != -1:
            num_ftrs = model.fc.in_features
            num_auxftrs = model.AuxLogits.fc.in_features
            model.fc = nn.Linear(num_ftrs, 3)
            model.AuxLogits.fc = nn.Linear(num_auxftrs, 3)
            model.aux_logits = False
        elif args.arch.find('densenet121') != -1:
            num_ftrs = model.classifier.in_features
            model.classifier = nn.Linear(num_ftrs, 3)
        elif args.arch.find('resnet') != -1:  # ResNet
            num_ftrs = model.fc.in_features
            model.fc = nn.Linear(num_ftrs, 3)
        else:
            print(
                "###Error: Fine-tuning is not supported on this architecture.###"
            )
            exit(-1)

        print(model)
    else:
        parameters = model.parameters()
    # name, parma_1 = model.classifier[6].parameters()

    # for name, param in model.named_parameters():
    #     if param.requires_grad:
    #         print(name)

    if args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.workers = int(args.workers / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpu])
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
    else:
        # DataParallel will divide and allocate batch_size to all available GPUs
        if args.arch.startswith('alexnet-1') or args.arch.startswith('vgg-1'):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    # criterion = nn.CrossEntropyLoss().cuda(args.gpu)
    #jiangjiewei add weight for crossentropyloss
    criterion = nn.CrossEntropyLoss(weight=torch.Tensor([1.5, 1.0, 3.0])).cuda(
        args.gpu)
    # use_cuda = True
    # device = torch.device("cuda" if use_cuda else "cpu")
    # class_weights = torch.FloatTensor([1.0, 0.2, 1.0]).cuda()
    # criterion = nn.CrossEntropyLoss(weight=class_weights).to(device)

    if args.arch.find('alexnet') != -1:
        fine_tune_parameters = model.classifier[6].parameters()
    elif args.arch.find('inception_v3') != -1:
        fine_tune_parameters = model.module.fc.parameters()
    elif args.arch.find('densenet121') != -1:
        fine_tune_parameters = model.module.classifier.parameters()
    elif args.arch.find('resnet') != -1:  # ResNet
        fine_tune_parameters = model.module.fc.parameters()
    else:
        print('### please check the ignored params ###')
        exit(-1)

    ignored_params = list(map(id, fine_tune_parameters))

    if args.arch.find('alexnet') != -1:
        base_params = filter(lambda p: id(p) not in ignored_params,
                             model.parameters())
    else:
        base_params = filter(lambda p: id(p) not in ignored_params,
                             model.module.parameters())

    optimizer = torch.optim.SGD(
        [
            {
                'params': base_params
            },  #model.parameters()
            {
                'params': fine_tune_parameters,
                'lr': 10 * args.lr
            }
        ],
        lr=args.lr,
        momentum=args.momentum,
        weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            if args.gpu is not None:
                # best_acc1 may be from a checkpoint from a different GPU
                best_acc1 = best_acc1.to(args.gpu)
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train1')
    valdir = os.path.join(args.data, 'val1')
    if args.advprop:
        normalize = transforms.Lambda(lambda img: img * 2.0 - 1.0)
    else:
        normalize = transforms.Normalize(
            mean=[0.5765036, 0.34929818, 0.2401832],
            std=[0.2179051, 0.19200659, 0.17808074])

    if 'efficientnet' in args.arch:
        image_size = EfficientNet.get_image_size(args.arch)
    else:
        image_size = args.image_size

    train_dataset = datasets.ImageFolder(
        traindir,
        transforms.Compose([
            # transforms.Resize((256, 256), interpolation=PIL.Image.BICUBIC),
            # transforms.Resize((224, 224)),
            transforms.Resize((args.image_size, args.image_size),
                              interpolation=PIL.Image.BICUBIC),
            # transforms.RandomResizedCrop((image_size, image_size) ),  #RandomRotation scale=(0.9, 1.0)
            transforms.RandomRotation(90),
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))
    print('classes:', train_dataset.classes)
    # Get number of labels
    labels_length = len(train_dataset.classes)

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=(train_sampler is None),
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=train_sampler)

    val_transforms = transforms.Compose([
        transforms.Resize((args.image_size, args.image_size),
                          interpolation=PIL.Image.BICUBIC),
        # transforms.CenterCrop((image_size,image_size)),
        transforms.ToTensor(),
        normalize,
    ])
    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir, val_transforms),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    if args.evaluate:
        res = validate(val_loader, model, criterion, args)
        with open('res.txt', 'w') as f:
            print(res, file=f)
        return

    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        adjust_learning_rate(optimizer, epoch, args)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, args)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion, args)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        if args.arch.find('alexnet') != -1:
            pre_name = './alexnet'
        elif args.arch.find('inception_v3') != -1:
            pre_name = './inception_v3'
        elif args.arch.find('densenet121') != -1:
            pre_name = './densenet121'
        elif args.arch.find('resnet50') != -1:
            pre_name = './resnet50'
        else:
            print('### please check the args.arch for pre_name###')
            exit(-1)

        if not args.multiprocessing_distributed or (
                args.multiprocessing_distributed
                and args.rank % ngpus_per_node == 0):
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer': optimizer.state_dict(),
                }, is_best, pre_name)

    # PATH = pre_name + '_fundus_net.pth'
    # torch.save(model.state_dict(), PATH)
    print('Finished Training')
コード例 #24
0
parser.add_argument('-v', '--valid', required=True)
parser.add_argument('-b', '--batchsize', required=False, default=8)
parser.add_argument('-mn', '--modelname', required=False, default="b5")

args = parser.parse_args()

MODEL_PATH = args.modeldir
TRAIN_PATH = args.train
VALID_PATH = args.valid
BATCH_SIZE = int(args.batchsize)
MODEL_NAME = "efficientnet-" + args.modelname

src = (ImageList.from_folder(
    path=TRAIN_PATH).split_by_rand_pct(0.2).label_from_folder())
transforms = ([rotate(degrees=(-90, 90), p=0.8)], [])
image_size = EfficientNet.get_image_size(MODEL_NAME)
data = (src.transform(transforms,
                      size=image_size,
                      resize_method=ResizeMethod.SQUISH).databunch(
                          bs=BATCH_SIZE).normalize(imagenet_stats))

data.show_batch(3, figsize=(9, 9))
"""
Load Model
"""
if torch.cuda.is_available():
    state_dict = torch.load(MODEL_PATH)
else:
    state_dict = torch.load(MODEL_PATH, map_location=torch.device('cpu'))

model = EfficientNet.from_pretrained(MODEL_NAME)
コード例 #25
0
#========================torch to onnx   Gender  ===================================================
# '''

import torch
from efficientnet_pytorch import EfficientNet
import sys
import torch.nn as nn
from collections import OrderedDict

# Specify which model to use
model_file = sys.argv[1]
model_name = model_file.split('.')[0]
output_name = "{}.onnx".format(model_name)
backbone = 'efficientnet-b7'
image_size = EfficientNet.get_image_size(backbone)
print('Image size: ', image_size)

# Load model
model = EfficientNet.from_pretrained(backbone, num_classes=2)

checkpoint = torch.load(model_file)
# new_state_dict = OrderedDict()
# for k, v in checkpoint['model'].items():
#     name = k.replace("module.", "")
#     new_state_dict[name]= v

# num_features = model._fc.in_features
# print(num_features)
# model._fc = nn.Linear(num_features, 4) #for emotion
# model._fc = nn.Linear(num_features, 7)
コード例 #26
0
def run():
    ########################################################################
    # Register Prediction Start
    ########################################################################
    aicrowd_helpers.execution_start()

    ########################################################################
    # Gather Input and Output paths from environment variables
    ########################################################################
    test_images_path, predictions_output_path = gather_input_output_path()

    ########################################################################
    # Gather Image Names
    ########################################################################
    image_names = gather_image_names(test_images_path)

    ########################################################################
    # Do your magic here to train the model
    ########################################################################
    # Preprocess first here

    src = (ImageList.from_folder(
        path='fastai-data').split_by_rand_pct(0.0).label_from_folder()
           )  # any folder which has sub-folders will work
    tfms = get_transforms(do_flip=True,
                          flip_vert=False,
                          max_rotate=10.0,
                          max_zoom=1.1,
                          max_lighting=0.2,
                          max_warp=0.2,
                          p_affine=0.75,
                          p_lighting=0.75)  # some tfms
    data = (src.transform(tfms, size=360,
                          resize_method=ResizeMethod.SQUISH).databunch(
                              bs=32, num_workers=0).normalize(imagenet_stats))

    learn = Learner(
        data, SnakeDetector(arch=models.resnet50), loss_func=L1Loss(
        ))  #temp data, we wont be using this, we will be using src_new
    learn.split([learn.model.cnn[:6], learn.model.cnn[6:], learn.model.head])
    state_dict = torch.load(
        'fastai-data/models/snake-detection-model.pth')  #our trained model
    learn.model.load_state_dict(state_dict['model'])
    if not os.path.exists('preprocessed-images'):
        os.makedirs('preprocessed-images')  #directory to store files

    src_new = (ImageList.from_folder(
        path=test_images_path).split_by_rand_pct(0.0).label_from_folder()
               )  # fetch given test images from data/images

    for filename in src_new.items:
        try:
            im = cv2.imread(f"{filename}", cv2.IMREAD_COLOR)
            im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
            im = cv2.resize(im, (360, 360), interpolation=cv2.INTER_AREA)
            im_height, im_width, _ = im.shape
            orig_im = cv2.imread(f"{filename}", cv2.IMREAD_COLOR)
            orig_im_height, orig_im_width, _ = orig_im.shape
            to_pred = open_image(filename)
            _, _, bbox = learn.predict(to_pred)
            im_original = cv2.imread(f"{filename}", cv2.IMREAD_COLOR)
            im_original = cv2.cvtColor(im_original, cv2.COLOR_BGR2RGB)
            im_original.shape
            im_original_width = im_original.shape[1]
            im_original_height = im_original.shape[0]
            bbox_new = bbox
            bbox_new[0] = bbox_new[0] * im_original_width
            bbox_new[2] = bbox_new[2] * im_original_width
            bbox_new[1] = bbox_new[1] * im_original_height
            bbox_new[3] = bbox_new[3] * im_original_height
            x_min, y_min, x_max, y_max = map(int, bbox_new)
            #cv2.rectangle(img, (x_min, y_min), (x_max, y_max), color=color, thickness=thickness)
            im_original = im_original[
                y_min:y_max, x_min:
                x_max]  #cropping is just slicing opencv uses h,w . which is y,x here
            im_original = cv2.cvtColor(im_original, cv2.COLOR_BGR2RGB)
            filename_str = str(filename)
            to_save = filename_str.replace(
                test_images_path, 'preprocessed-images'
            )  # original file is in  /data/images/*.jpg -> store it at preprocessed-images/*.jpg and use preprocessed-images folder later
            cv2.imwrite(to_save, im_original)
        except:
            pass
    del learn
    gc.collect()

    model_name = 'efficientnet-b5'
    image_size = EfficientNet.get_image_size(model_name)

    model = EfficientNet.from_pretrained(model_name)

    np.random.seed(13)

    src = (ImageList.from_folder(
        path='fastai-data').split_by_rand_pct(0.2).label_from_folder())

    object_detection_results_path = os.getcwd(
    ) + "/preprocessed-images"  #output of previous step is stored here.
    src.add_test_folder(object_detection_results_path)

    tfms = ([rotate(degrees=(-90, 90), p=0.8)], [])

    bs = 8
    data = (src.transform(tfms,
                          size=image_size,
                          resize_method=ResizeMethod.SQUISH).databunch(
                              bs=bs, num_workers=0).normalize(imagenet_stats))
    model.add_module('_fc', nn.Linear(
        2048,
        data.c))  #Replace the final layer of b5 model with number of classes
    loss_func = LabelSmoothingCrossEntropy()  #following EfficientNet paper
    RMSprop = partial(torch.optim.RMSprop)  #Following EfficientNet paper

    learn = Learner(data,
                    model,
                    loss_func=loss_func,
                    opt_func=RMSprop,
                    metrics=[accuracy,
                             FBeta(beta=1, average='macro')])

    learn.split(
        [[learn.model._conv_stem, learn.model._bn0, learn.model._blocks[:19]],
         [learn.model._blocks[19:], learn.model._conv_head],
         [learn.model._bn1, learn.model._fc]])  #for differential learning

    learn.load(
        'b5-seed-13-round-3'
    )  #best single model - 85.2 local cv. Try ensemble later. with 83.1 and 85.2 models

    ########################################################################
    # Generate Predictions
    ########################################################################

    preds, _ = learn.TTA(ds_type=DatasetType.Test)  # Test time augmentation

    probs_seed_13 = np.exp(preds) / np.exp(preds).sum(1)[:, None]

    del learn
    gc.collect()  #garbage collect

    model = EfficientNet.from_pretrained(model_name)

    model.add_module('_fc', nn.Linear(2048, data.c))

    learn = Learner(data,
                    model,
                    loss_func=loss_func,
                    opt_func=RMSprop,
                    metrics=[accuracy,
                             FBeta(beta=1, average='macro')])  #mew learner

    learn.split(
        [[learn.model._conv_stem, learn.model._bn0, learn.model._blocks[:19]],
         [learn.model._blocks[19:], learn.model._conv_head],
         [learn.model._bn1,
          learn.model._fc]])  #not needed, but not takin chances

    learn.load('b5-seed-15-round-7')  #83.6, 83.1 localcv model

    preds, _ = learn.TTA(ds_type=DatasetType.Test)

    probs_seed_15 = np.exp(preds) / np.exp(preds).sum(1)[:, None]

    probs = (probs_seed_13 + probs_seed_15) / 2

    probs_np = probs.numpy()

    df_test = pd.read_csv(
        'given_submission_sample_file.csv', low_memory=False
    )  #use the given sample file to replace probabilites with our model predictions, This way, no need to worry about corrupted images

    df_classes = pd.read_csv('class.csv', low_memory=False)  #class mapping

    data_dict = df_classes.set_index(
        'class_idx')['original_class'].to_dict()  #for look up

    probs_np = probs.numpy()

    df_testfile_mapping = pd.DataFrame(columns=['filename', 'map'])

    df_testfile_mapping['filename'] = df_test['filename']

    for i, row in df_testfile_mapping.iterrows():
        row['map'] = i

    data_dict_filename = df_testfile_mapping.set_index(
        'filename'
    )['map'].to_dict(
    )  # for lookup, returns the index where the filename is found in the given submission file

    i = 0
    for test_file in data.test_ds.items:
        filename = (os.path.basename(test_file))
        map_val = int(data_dict_filename[filename])
        for c in range(0, 45):
            df_test.loc[
                map_val,
                data_dict[int(data.classes[c].split("-")[1])]] = probs_np[i][c]
        i += 1


#around 7 predictions causes assertion error, for now submit them as class-204

    for i, row in df_test.iterrows():
        sum_temp = row[1:46].sum()
        low_limit = 1 - 1e-6
        high_limit = 1 + 1e-6

        if not (sum_temp >= low_limit and sum_temp <= high_limit):
            for c in range(1, 46):
                df_test.iloc[i, c] = 0.
        df_test.loc[i, 'thamnophis_sirtalis'] = 1.

    df_test.to_csv('generated-submission-3.csv', index=False)

    copyfile('generated-submission-3.csv',
             predictions_output_path)  #save to output path

    ########################################################################
    # Register Prediction Complete
    ########################################################################
    aicrowd_helpers.execution_success(
        {"predictions_output_path": predictions_output_path})
コード例 #27
0
def main_worker(gpu, ngpus_per_node, args):
    global best_acc1
    args.gpu = gpu

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)
    # create model
    if 'efficientnet' in args.arch:  # NEW  efficientnet-b4
        if args.pretrained:
            model = EfficientNet.from_pretrained(args.arch, num_classes=29)
            print("=> using pre-trained model '{}'".format(args.arch))
        else:
            print("=> creating model '{}'".format(args.arch))
            model = EfficientNet.from_name(args.arch)

    else:
        if args.pretrained:
            print("=> using pre-trained model '{}'".format(args.arch))
            model = models.__dict__[args.arch](pretrained=True)
        else:
            print("=> creating model '{}'".format(args.arch))
            model = models.__dict__[args.arch]()

    if args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.workers = int(args.workers / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpu])
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
    else:
        # DataParallel will divide and allocate batch_size to all available GPUs
        if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda(args.gpu)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            if args.gpu is not None:
                # best_acc1 may be from a checkpoint from a different GPU
                best_acc1 = best_acc1.to(args.gpu)
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    # traindir = os.path.join(args.data, 'train')
    # valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    # train_dataset = datasets.ImageFolder(
    # traindir,
    # transforms.Compose([
    # transforms.Resize(256),
    # transforms.RandomResizedCrop(224),
    # transforms.RandomHorizontalFlip(),
    # transforms.ToTensor(),
    # normalize,
    # ]))

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset)
    else:
        train_sampler = None

    # train_loader = torch.utils.data.DataLoader(
    # train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
    # num_workers=args.workers, pin_memory=True, sampler=train_sampler)
    train_transform = transforms.Compose([
        transforms.Resize(256),  ##输入统一256大小,
        transforms.RandomResizedCrop(
            224),  #从原图像随机切割一张(224, 224)的图像,再统一resize 224
        transforms.RandomHorizontalFlip(),  ##水平翻转
        transforms.RandomVerticalFlip(p=0.5),  #垂直翻转
        transforms.RandomRotation(30),  #在(-30, 30)范围内旋转
        transforms.ToTensor(),
        normalize  # transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])  #
    ])
    train_loader = torch.utils.data.DataLoader(
        dataset=imgdata("crop//dataset/yun/train.txt",
                        train_transform),  # return img and label
        batch_size=args.batch_size,
        shuffle=(train_sampler is None),
        num_workers=args.workers,
        pin_memory=True,
        sampler=train_sampler)

    if 'efficientnet' in args.arch:
        image_size = EfficientNet.get_image_size(args.arch)
        val_transforms = transforms.Compose([
            transforms.Resize(image_size, interpolation=PIL.Image.BICUBIC),
            transforms.CenterCrop(image_size),
            transforms.ToTensor(),
            normalize,
        ])
        print('Using image size', image_size)
    else:
        val_transforms = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])
        print('Using image size', 224)

    val_loader = torch.utils.data.DataLoader(dataset=test_imgdata(
        "crop//dataset/yun/train.txt", val_transforms),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # if args.evaluate:
    # res = validate(val_loader, model, criterion, args)
    # with open('res.txt', 'w') as f:
    # print(res, file=f)
    # print("validation")
    # return

    for epoch in range(args.start_epoch, args.epochs):
        print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
        print(args.start_epoch, args.epochs)
        if args.distributed:
            train_sampler.set_epoch(epoch)
        adjust_learning_rate(optimizer, epoch, args)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, args)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion, args)

        # remember best acc@1 and save checkpoint
        is_best = acc1 >= best_acc1
        best_acc1 = max(acc1, best_acc1)

        if not args.multiprocessing_distributed or (
                args.multiprocessing_distributed
                and args.rank % ngpus_per_node == 0):
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer': optimizer.state_dict(),
                }, is_best)
コード例 #28
0
warnings.filterwarnings('ignore')
np.set_printoptions(linewidth=np.inf)
np.set_printoptions(threshold=np.inf)

# Arguments
networkname = "efficientnet-b1"
test_interval = 100
save_interval = 1000
batch_size = 64
max_epochs = 105
learning_rate = 3e-5
max_cells = 31
dir = "b1-3e-5-long"
datadir = "/data/128p_pc/"

img_size = EfficientNet.get_image_size(networkname)

os.makedirs("../logs", exist_ok=True)
os.makedirs("../logs/" + dir, exist_ok=True)
train_mae_log = []
test_mae_log = []
train_dev_log = []
test_dev_log = []
train_cor_log = []
test_cor_log = []


class CustomDatasetFromImages(Dataset):
    def __init__(self, csv_path):
        # Transforms
        self.to_tensor = transforms.Compose([
コード例 #29
0
    return result


def post_transforms():
    # we use ImageNet image normalization
    # and convert it to torch.Tensor
    return [A.Normalize(p=1.0), ToTensorV2(p=1.0), ]


if __name__ == "__main__":
    warnings.simplefilter("ignore", UserWarning)
    warnings.simplefilter("ignore", DeprecationWarning)
    warnings.filterwarnings('ignore')
    os.environ["PYTHONWARNINGS"] = "ignore"
    config = ConfigExperiment()
    config.size = EfficientNet.get_image_size(config.model_name)
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    utils.set_global_seed(config.seed)
    utils.prepare_cudnn(deterministic=True)


    train_transforms = plant.compose([
        pre_transforms(config.size),
        hard_transforms(),
        post_transforms()
    ])
    valid_transforms = plant.compose([
        pre_transforms(config.size),
        post_transforms()
    ])
コード例 #30
0
def task(gpu, ngpus_per_node, args):

    args.gpu = gpu
    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
        #dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
        #                        world_size=args.world_size, rank=args.rank)
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)

    # create model
    if 'efficientnet' in args.arch:
        model = EfficientNet.from_pretrained(args.arch)
    else:
        model = models.__dict__[args.arch](pretrained=True)

    if args.distributed:
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.workers = int(args.workers / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpu])
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
    else:
        # DataParallel will divide and allocate batch_size to all available GPUs
        if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()

    cudnn.benchmark = True

    # data
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    if 'efficientnet' in args.arch:
        image_size = EfficientNet.get_image_size(args.arch)
        dataset = ImageFileList(
            args.data_root, args.data_list,
            transforms.Compose([
                transforms.Resize(image_size, interpolation=Image.BICUBIC),
                transforms.CenterCrop(image_size),
                transforms.ToTensor(),
                normalize,
            ]))
    else:
        dataset = ImageFileList(
            args.data_root, args.data_list,
            transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                normalize,
            ]))

    if args.distributed:
        sampler = DistributedSequentialSampler(dataset, args.world_size,
                                               args.rank)
    else:
        sampler = None

    loader = torch.utils.data.DataLoader(dataset,
                                         batch_size=args.batch_size,
                                         shuffle=False,
                                         num_workers=args.workers,
                                         pin_memory=True,
                                         sampler=sampler)

    # extract
    batch_time = AverageMeter('Time', ':6.3f')
    data_time = AverageMeter('Data', ':6.3f')
    progress = ProgressMeter(len(loader), batch_time, prefix='Test: ')

    model.eval()
    end = time.time()

    features = []
    for i, images in enumerate(loader):
        data_time.update(time.time() - end)

        if args.gpu is not None:
            images = images.cuda(args.gpu, non_blocking=True)

        with torch.no_grad():
            if args.action == 'extract':
                if args.distributed:
                    feat = F.adaptive_avg_pool2d(
                        model.module.extract_features(images),
                        1).squeeze(-1).squeeze(-1)
                else:
                    feat = F.adaptive_avg_pool2d(
                        model.extract_features(images),
                        1).squeeze(-1).squeeze(-1)
            else:
                feat = torch.softmax(model(images), dim=1)
        features.append(feat.cpu().numpy())
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            progress.print(i)

    features = np.concatenate(features, axis=0)
    if args.distributed:
        all_features = gather_tensors_batch(features, 100)
        if args.rank == 0:
            np.save(args.output, all_features)
    else:
        np.save(args.output, features)