Ejemplo n.º 1
0
def main():

    batch_size = 20

    # model = models.resnet18(pretrained=False)
    model = get_model("xception", pretrained=False)
    model = nn.Sequential(*list(
        model.children())[:-1])  # Remove original output layer

    model[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d(1))  # xcep
    model = FCN(model, 2048)
    model.cuda()
    train_data = Task1_loader("./Task_1/train.csv", phase='train')
    test_data = Task1_loader("./Task_1/test.csv", phase='test')

    train_loader = DataLoader(train_data,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=8)
    valid_loader = DataLoader(test_data,
                              batch_size=batch_size,
                              shuffle=False,
                              num_workers=8)

    criterion = nn.BCELoss()
    # optimizer = optim.SGD(model.parameters(), lr=0.0018, momentum=0.27)
    optimizer = optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-5)

    train(model,
          train_loader,
          valid_loader,
          criterion,
          optimizer,
          100,
          device='cuda')
Ejemplo n.º 2
0
    def __init__(self, pretrained=True, latent_layer_num=20):
        super().__init__()

        model = get_model("mobilenet_w1", pretrained=pretrained)
        model.features.final_pool = nn.AvgPool2d(4)

        all_layers = []
        remove_sequential(model, all_layers)
        all_layers = remove_DwsConvBlock(all_layers)

        lat_list = []
        end_list = []

        for i, layer in enumerate(all_layers[:-1]):
            if i <= latent_layer_num:
                lat_list.append(layer)
            else:
                # ABB: Canviar estructura!
                end_list.append(layer)

        self.lat_features = nn.Sequential(*lat_list)
        self.end_features = nn.Sequential(*end_list)

        self.output = nn.Linear(1024, 50, bias=False)
        self.rf = nn.Linear(1024, 1, bias=False)

        self.sig = nn.Sigmoid()
Ejemplo n.º 3
0
def xception2(pretrained=False):
    model = get_model("xception", pretrained=pretrained)
    model = nn.Sequential(*list(
        model.children())[:-1])  # Remove original output layer
    model[0].final_block.pool = nn.Sequential(nn.Flatten())
    model = FCN2(model)
    return model
Ejemplo n.º 4
0
def xception(pretrained=False):
    model = get_model("xception", pretrained=pretrained)
    model = nn.Sequential(*list(
        model.children())[:-1])  # Remove original output layer
    model[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)))
    model = FCN(model, 2048)
    return model
Ejemplo n.º 5
0
def create_model(predict_only: bool, dropout: float) -> Any:
    logger.info(f'creating a model {opt.MODEL.ARCH}')

    model = get_model(opt.MODEL.ARCH,
                      pretrained=not predict_only,
                      root=opt.PRETRAINED_PATH)

    if 'ception' not in opt.MODEL.ARCH:
        model.features[-1] = nn.AdaptiveAvgPool2d(1)

    if opt.MODEL.ARCH == 'pnasnet5large':
        if dropout < 0.1:
            model.output = nn.Linear(model.output[-1].in_features,
                                     opt.MODEL.NUM_CLASSES)
        else:
            model.output = nn.Sequential(
                nn.Dropout(dropout),
                nn.Linear(model.output[-1].in_features, opt.MODEL.NUM_CLASSES))
    elif 'ception' in opt.MODEL.ARCH:
        model.output[-1] = nn.Linear(model.output[-1].in_features,
                                     opt.MODEL.NUM_CLASSES)
    else:
        if dropout < 0.1:
            model.output = nn.Linear(model.output.in_features,
                                     opt.MODEL.NUM_CLASSES)
        else:
            model.output = nn.Sequential(
                nn.Dropout(dropout),
                nn.Linear(model.output.in_features, opt.MODEL.NUM_CLASSES))

    model = torch.nn.DataParallel(model).cuda()
    model.cuda()
    return model
Ejemplo n.º 6
0
 def __init__(self, model_name, dropout):
     super().__init__()
     model = get_model(model_name, pretrained='imagenet')
     self.backbone = model.features
     _, n_features, *_ = self.backbone(torch.rand(1, 3, 256, 256)).size()
     self.linear = nn.Sequential(nn.Dropout(dropout),
                                 nn.Linear(n_features, 4),
                                 )
Ejemplo n.º 7
0
    def __init__(self):
        super(CNNModel, self).__init__()
        # Load pretrained network as backbone
        pretrained = get_model(opt.classifier, pretrained=True)
        self.backbone = pretrained.features
        self.output = pretrained.output
        self.classifier = nn.Linear(1000, 7)

        del pretrained
Ejemplo n.º 8
0
 def load_net(self, back_bone, weight_path):
     net = get_model(back_bone, pretrained=True)
     net.features.final_pool = nn.AdaptiveAvgPool2d(1)
     net.output = nn.Sequential()
     state_dict = torch.load(weight_path)
     net.load_state_dict(state_dict, strict=False)
     net.cuda()
     net.eval()
     return net
Ejemplo n.º 9
0
def load_net(load_pth='../../Fancy.1e-2.mobilenet/models/step_100000.pth'):
    net = get_model('mobilenet_w1', pretrained=True)
    net.features.final_pool = nn.AdaptiveAvgPool2d(1)
    net.output = nn.Sequential()

    state_dict = torch.load(load_pth)
    net.load_state_dict(state_dict, strict=False)

    return net
Ejemplo n.º 10
0
def GetPretrainedXception(
        path='{}/../weights/xception/best_model.pth.tar'.format(abspath)):
    model = get_model("xception", pretrained=False)
    model = nn.Sequential(*list(model.children())[:-1])
    model[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d(1))
    model = FCN(model, 2048)
    chpt = torch.load(path)
    model.load_state_dict(chpt['model'])
    return model
Ejemplo n.º 11
0
def load_net(load_pth='models/latest.pth'):
    net = get_model('resnet50', pretrained=True)
    net.features.final_pool = nn.AdaptiveAvgPool2d(1)
    net.output = nn.Linear(2048, 228)

    if os.path.isfile(load_pth):
        state_dict = torch.load(load_pth)
        net.load_state_dict(state_dict)

    return net
Ejemplo n.º 12
0
def load_net(load_pth='../../../Naive/models/step_99500.pth'):
    net = get_model('resnet50', pretrained=True)
    net.features.final_pool = nn.AdaptiveAvgPool2d(1)
    net.output = nn.Sequential()

    if os.path.isfile(load_pth):
        state_dict = torch.load(load_pth)
        net.load_state_dict(state_dict, strict=False)

    return net
Ejemplo n.º 13
0
def load_net(load_pth='models/step_100000.pth'):
    net = get_model('mobilenet_w1', pretrained=True)
    net.features.final_pool = nn.AdaptiveAvgPool2d(1)
    net.output = nn.Linear(1024, 228)

    if os.path.isfile(load_pth):
        state_dict = torch.load(load_pth)
        net.load_state_dict(state_dict)

    return net
Ejemplo n.º 14
0
def load_net(load_pth=''):
    net = get_model('mobilenet_w1', pretrained=True)
    net.features.final_pool = nn.AdaptiveAvgPool2d(1)
    net.output = nn.Sequential()

    if os.path.isfile(load_pth):
        state_dict = torch.load(load_pth)
        net.load_state_dict(state_dict, strict=False)

    return net
def load_model(name, checkpoint_path=None, pretrained=False):
    model = get_model(name, pretrained=pretrained)
    model = nn.Sequential(*list(
        model.children())[:-1])  # Remove original output layer

    model[0].final_pool = nn.Sequential(nn.AdaptiveAvgPool2d(1))
    # model[0].final_pool = nn.Sequential(Pooling())

    model = FCN(model, 2048)

    if checkpoint_path is not None:
        model.load_state_dict(torch.load(checkpoint_path))

    return model
Ejemplo n.º 16
0
    def __init__(self, model_name):
        super(CNNModel, self).__init__()
        # Load pretrained network as backbone
        pretrained = get_model(model_name, pretrained=True)
        # remove last layer of fc
        self.backbone = pretrained.features
        #       pretrained.output.fc3 = nn.Linear(4096, 7)
        self.output = pretrained.output
        self.classifier = nn.Linear(1000, 7)

        #        nn.init.zeros_(self.classifier.fc3.bias)
        #        nn.init.normal_(self.classifier.fc3.weight, mean=0.0, std=0.02)

        del pretrained
Ejemplo n.º 17
0
def build_model(args, device):
    weight_path = args.weight_path

    if args.model_type == 'lrcn':
        assert args.batch_size <= 8
        model = get_model("xception", pretrained=True)
        model = nn.Sequential(*list(model.children())[:-1])
        model[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)))
        freeze_until(model, 'base.0.stage4.unit1.identity_conv.conv.weight')
        lrcn_model = LRCN(model, 300, 1)
        lrcn_model.to(device)
        return lrcn_model

    if args.model == 'resnet50':
        model = BaseCNN('resnet50',
                        pretrained='imagenet',
                        dropout_ratio=args.dropout,
                        GeM_pool=True)
        freeze_until(model, "base_model.layer3.0.conv1.weight")
        print("fine-tuning resnet50")

    elif args.model == 'resnext':
        model = MyResNeXt(checkpoint=weight_path)
        freeze_until(model, "layer4.0.conv1.weight")
        print("fine-tuning resnext")

    elif args.model == 'xception':
        assert args.batch_size <= 16
        model = get_model("xception", pretrained=True)
        model = nn.Sequential(*list(model.children())[:-1])
        model[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)))
        model = FCN(model, 2048)
        freeze_until(model, 'base.0.stage4.unit1.identity_conv.conv.weight')
        print("fine-tuning xception")

    model.to(device)
    return model
Ejemplo n.º 18
0
    def __init__(self,
                 encoder_params: dict,
                 num_classes: int,
                 dropout: float = 0.2):
        super().__init__()

        model = model_provider.get_model(**encoder_params)
        self.backbone = model.features
        self.backbone.final_pool = nn.AdaptiveAvgPool2d((1, 1))

        _, n_features, *_ = self.backbone(torch.rand(
            2, 3, 32, 32)).size()  # backbone last layer `out_features`
        self.linear = nn.Sequential(
            collections.OrderedDict([
                ('dropout', nn.Dropout(dropout)),
                ('fc', nn.Linear(n_features, num_classes)),
            ]))
Ejemplo n.º 19
0
def evaluate_root(source_dir, output_dir, epsilon, eval_set, defenses,
                  **kwargs):
    ori_dataset = OriginalDataset(data_dir=source_dir)
    adv_dataset = OriginalDataset(data_dir=output_dir,
                                  transform=build_transforms(defenses))
    adv_dataloader = DataLoader(adv_dataset, batch_size=1)

    if check_adv_validity(ori_dataset, adv_dataset, epsilon):
        print('✓ Adversarial dataset validity passed!')
    else:
        print('❌ Adversarial dataset validity not passed!')
        return

    accuracies = np.empty((len(eval_models[eval_set]), 10))

    for i, model_name in enumerate(eval_models[eval_set]):
        model = get_model(model_name, pretrained=True)
        accuracies[i] = evaluate_single_model(
            model_name.replace('_cifar10', ''), model, adv_dataloader)

    means = np.mean(accuracies, axis=1)

    # Print markdown table
    with (output_dir / f'result-{eval_set}{"-defense" if defenses else ""}.md'
          ).open('w') as f:
        print('Evaluation results: ', file=f)
        print(
            f"| Models     | {' | '.join(s.replace('_cifar10', '') for s in eval_models[eval_set])} |",
            file=f)
        print(f"| ---------- |{'|'.join('------' for _ in eval_models)}|",
              file=f)

        for i, label_name in enumerate(all_labels):
            print(
                f"| {label_name:10s} |{'|'.join(f' {k:.2f} ' for k in accuracies[:, i])}|",
                file=f)

        print(
            f"| Mean       |{'|'.join(f' {k:.2f} ' for k in means)}| {np.mean(means):.4f} | {np.std(means):.4f} |",
            file=f)
        print(f'\nCategory-wise accuracies', file=f)
        print(f"{'|'.join(f' {k:.2f} ' for k in np.mean(accuracies, axis=0))}",
              file=f)
        print(
            f"\n> Overall: mean = {np.mean(means):.4f}, std = {np.std(means):.4f}",
            file=f)
Ejemplo n.º 20
0
    def __init__(self) -> None:
        super().__init__()

        self.model = get_model(config.model.arch,
                               pretrained=not args.gen_predict)
        assert config.model.input_size % 32 == 0

        self.model.features[-1] = nn.AdaptiveAvgPool2d(1)
        in_features = self.model.output.in_features

        if config.model.bottleneck_fc is not None:
            self.model.output = nn.Sequential(
                nn.Linear(in_features, config.model.bottleneck_fc),
                nn.Linear(config.model.bottleneck_fc,
                          config.model.num_classes))
        else:
            self.model.output = nn.Linear(in_features,
                                          config.model.num_classes)
Ejemplo n.º 21
0
def load_net_opt(load_pth='models/latest.pth', base_lr=1e-1):
    net = get_model('resnet50', pretrained=True)
    net.features.final_pool = nn.AdaptiveAvgPool2d(1)
    net.output = nn.Linear(2048, 228)

    if os.path.isfile(load_pth):
        state_dict = torch.load(load_pth)
        net.load_state_dict(state_dict)

    head_params = list(map(id, net.output.parameters()))
    base_params = filter(lambda p: id(p) not in head_params, net.parameters())

    opt = optim.SGD([
                {'params': base_params, 'lr': base_lr / 10},
                {'params': net.output.parameters(), 'lr': base_lr}
            ], lr=base_lr, momentum=0.9)

    return net, opt
Ejemplo n.º 22
0
def build_model(args, device):
    weight_path = args.weight_path
    if args.model == 'resnet50':
        model = BaseCNN('resnet50', pretrained='imagenet', dropout_ratio=args.dropout, GeM_pool=True)
        freeze_until(model, "base_model.layer3.0.conv1.weight")
        print("fine-tuning")

    elif args.model == 'resnext':
        model = MyResNeXt(checkpoint=weight_path)
        freeze_until(model, "layer4.0.conv1.weight")
        print("fine-tuning")

    elif args.model == 'xception':
        model = get_model("xception", pretrained=True)
        model = nn.Sequential(*list(model.children())[:-1])
        model[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d((1,1)))
        model = FCN(model, 2048)
    else:
        NotImplementedError

    model.to(device)
    return model
Ejemplo n.º 23
0
    def __init__(self,
                 model_name,
                 num_classes,
                 loss='softmax',
                 IN_first=False,
                 pooling_type='avg',
                 **kwargs):
        super().__init__(**kwargs)
        assert self.is_classification(), f"{model_name} model is adapted for classification tasks only"
        self.pooling_type = pooling_type
        self.loss = loss
        assert isinstance(num_classes, int)

        model = get_model(model_name, num_classes=1000, pretrained=self.pretrained)
        assert hasattr(model, 'features') and isinstance(model.features, nn.Sequential)
        self.features = model.features
        self.features = self.features[:-1] # remove pooling, since it can have a fixed size
        if self.loss not in ['am_softmax']:
            self.output_conv = nn.Conv2d(in_channels=model.output.in_channels, out_channels=num_classes, kernel_size=1, stride=1, bias=False)
        else:
            self.output_conv = AngleSimpleLinear(model.output.in_channels, num_classes)
            self.num_features = self.num_head_features = model.output.in_channels

        self.input_IN = nn.InstanceNorm2d(3, affine=True) if IN_first else None
Ejemplo n.º 24
0
def create_model(config: Any, pretrained: bool) -> Any:
    dropout = config.model.dropout

    # support the deprecated model
    if config.version == '2b_se_resnext50':
        model = se_resnext50_32x4d(
            pretrained='imagenet' if pretrained else None)
        model.avg_pool = nn.AdaptiveAvgPool2d(1)
        model.last_linear = nn.Linear(model.last_linear.in_features,
                                      config.model.num_classes)

        model = torch.nn.DataParallel(model)
        return model

    if not IN_KERNEL:
        model = get_model(config.model.arch, pretrained=pretrained)
    else:
        model = get_model(config.model.arch,
                          pretrained=pretrained,
                          root='../input/pytorchcv-models/')

    if config.model.arch == 'xception':
        model.features[-1].pool = nn.AdaptiveAvgPool2d(1)
    else:
        model.features[-1] = nn.AdaptiveAvgPool2d(1)

    if config.model.arch == 'pnasnet5large':
        if dropout == 0.0:
            model.output = nn.Linear(model.output[-1].in_features,
                                     config.model.num_classes)
        else:
            model.output = nn.Sequential(
                nn.Dropout(dropout),
                nn.Linear(model.output[-1].in_features,
                          config.model.num_classes))
    elif config.model.arch == 'xception':
        if dropout < 0.1:
            model.output = nn.Linear(2048, config.model.num_classes)
        else:
            model.output = nn.Sequential(
                nn.Dropout(dropout), nn.Linear(2048, config.model.num_classes))
    elif config.model.arch.startswith('inception'):
        if dropout < 0.1:
            model.output = nn.Linear(model.output[-1].in_features,
                                     config.model.num_classes)
        else:
            model.output = nn.Sequential(
                nn.Dropout(dropout),
                nn.Linear(model.output[-1].in_features,
                          config.model.num_classes))
    else:
        if dropout < 0.1:
            model.output = nn.Linear(model.output.in_features,
                                     config.model.num_classes)
        else:
            model.output = nn.Sequential(
                nn.Dropout(dropout),
                nn.Linear(model.output.in_features, config.model.num_classes))

    model = torch.nn.DataParallel(model)
    return model
Ejemplo n.º 25
0
from sklearn.model_selection import train_test_split
import sklearn.metrics

import torch
import torch.nn as nn
import torch.nn.functional as F

import warnings
warnings.filterwarnings("ignore")

from torch.utils.data import Dataset, DataLoader
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]

from pytorchcv.model_provider import get_model
model = get_model("xception", pretrained=True)
#model = get_model("resnet18", pretrained=True)
#model = get_model("resnet50", pretrained=True)

model = nn.Sequential(*list(
    model.children())[:-1])  # Remove original output layer
model[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d(1))


class Head(torch.nn.Module):
    def __init__(self, in_f, out_f):
        super().__init__()

        self.f = nn.Flatten()

        self.b1 = nn.BatchNorm1d(in_f)  #in_f=2048
Ejemplo n.º 26
0
        return self.df.shape[0]


if __name__ == '__main__':
    torch.multiprocessing.set_sharing_strategy('file_system')
    cudnn.benchmark = True

    test_df = pd.read_csv('../data/test.csv', dtype=str)
    print('test_df', test_df.shape)

    test_dataset = ImageDataset(test_df, mode='test')
    test_loader = DataLoader(test_dataset,
                             batch_size=BATCH_SIZE,
                             shuffle=False,
                             num_workers=NUM_WORKERS)

    model = get_model('seresnext50_32x4d').cuda()
    model.eval()

    results = []
    softmax = nn.Softmax()

    with torch.no_grad():
        for i, input_ in enumerate(tqdm(test_loader, disable=IN_KERNEL)):
            output = model(input_.cuda())
            output = softmax(output, dim=1)
            results.append(output.cpu().numpy())

    with open('imagenet_classes.pkl', 'wb') as f:
        pickle.dump(results, f)
Ejemplo n.º 27
0
def classifier(files):
    # Open log file and begin to append
    f = open("deepfakeLog.log", "a+")
    # Sort videos
    test_videos = sorted([x for x in files if x[-4:] == ".mp4"])

    # Check for whether GPU is available.
    gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # Set up face detection
    facedet = blazeface.BlazeFace().to(gpu)
    facedet.load_weights(current_path +
                         "\\lib\\blazeface-pytorch\\blazeface.pth")
    facedet.load_anchors(current_path +
                         "\\lib\\blazeface-pytorch\\anchors.npy")
    _ = facedet.train(False)

    from helpers.read_video_1 import VideoReader
    from helpers.face_extract_1 import FaceExtractor

    # Determine how many frames are analyzed per video
    frames_per_video = 20
    # Read in frames
    video_reader = VideoReader()
    video_read_fn = lambda x: video_reader.read_frames(
        x, num_frames=frames_per_video)
    face_extractor = FaceExtractor(video_read_fn, facedet)

    # Size of images
    input_size = 150

    from torchvision.transforms import Normalize

    # Mean/std deviation values from original code
    mean = [0.485, 0.456, 0.406]
    std = [0.229, 0.224, 0.225]
    normalize_transform = Normalize(mean, std)

    # Resize image
    def isotropically_resize_image(img, size, resample=cv2.INTER_AREA):
        h, w = img.shape[:2]
        if w > h:
            h = h * size // w
            w = size
        else:
            w = w * size // h
            h = size

        resized = cv2.resize(img, (w, h), interpolation=resample)
        return resized

    # Turn image into correct size
    def make_square_image(img):
        h, w = img.shape[:2]
        size = max(h, w)
        t = 0
        b = size - h
        l = 0
        r = size - w
        return cv2.copyMakeBorder(img,
                                  t,
                                  b,
                                  l,
                                  r,
                                  cv2.BORDER_CONSTANT,
                                  value=0)

    # Setup to grab model
    from pytorchcv.model_provider import get_model
    model = get_model("xception", pretrained=False)
    model = nn.Sequential(*list(
        model.children())[:-1])  # Remove original output layer
    model[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d(1))

    # Define model
    class Head(torch.nn.Module):
        def __init__(self, in_f, out_f):
            super(Head, self).__init__()

            self.f = nn.Flatten()
            self.l = nn.Linear(in_f, 512)
            self.d = nn.Dropout(0.5)
            self.o = nn.Linear(512, out_f)
            self.b1 = nn.BatchNorm1d(in_f)
            self.b2 = nn.BatchNorm1d(512)
            self.r = nn.ReLU()

        def forward(self, x):
            x = self.f(x)
            x = self.b1(x)
            x = self.d(x)

            x = self.l(x)
            x = self.r(x)
            x = self.b2(x)
            x = self.d(x)

            out = self.o(x)
            return out

    # Define fully conv network
    class FCN(torch.nn.Module):
        def __init__(self, base, in_f):
            super(FCN, self).__init__()
            self.base = base
            self.h1 = Head(in_f, 1)

        def forward(self, x):
            x = self.base(x)
            return self.h1(x)

    net = []
    model = FCN(model, 2048)
    if (gpu == "gpu"):
        model = model.cuda()
    # Load trained model
    model.load_state_dict(
        torch.load(current_path + '\\lib\\model.pth',
                   map_location=torch.device(gpu)))
    net.append(model)

    # Prediction
    def predict_on_video(video_path, batch_size):
        try:
            facesList = []
            # Find the faces for N frames in the video.
            faces = face_extractor.process_video(video_path)

            # Only look at one face per frame.
            face_extractor.keep_only_best_face(faces)

            #facesList = faces

            if len(faces) > 0:
                # NOTE: When running on the CPU, the batch size must be fixed
                # or else memory usage will blow up. (Bug in PyTorch?)
                x = np.zeros((batch_size, input_size, input_size, 3),
                             dtype=np.uint8)

                # If we found any faces, prepare them for the model.
                n = 0
                frameNum = []
                for frame_data in faces:
                    frameNum.append(frame_data["frame_idx"])
                    for face in frame_data["faces"]:
                        # Resize to the model's required input size.
                        # We keep the aspect ratio intact and add zero
                        # padding if necessary.
                        facesList.append(face)
                        resized_face = isotropically_resize_image(
                            face, input_size)
                        resized_face = make_square_image(resized_face)

                        if n < batch_size:
                            x[n] = resized_face
                            n += 1
                        else:
                            print(
                                "WARNING: have %d faces but batch size is %d" %
                                (n, batch_size))

                        # Test time augmentation: horizontal flips.
                        # TODO: not sure yet if this helps or not
                        #x[n] = cv2.flip(resized_face, 1)
                        #n += 1

                if n > 0:
                    x = torch.tensor(x, device=gpu).float()

                    # Preprocess the images.
                    x = x.permute((0, 3, 1, 2))

                    for i in range(len(x)):
                        x[i] = normalize_transform(x[i] / 255.)
                        # x[i] = x[i] / 255.

                    # Make a prediction, then take the average.
                    with torch.no_grad():
                        y_pred = model(x)
                        y_pred = torch.sigmoid(y_pred.squeeze())
                        # Store highest three predictions in list
                        highestThree = sorted(range(len(y_pred)),
                                              key=lambda i: y_pred[i])[-3:]
                        #lowestThree = sorted(range(len(y_pred)), key=lambda i: y_pred[i])[:3]

                        highestFrameNums = []
                        # Save frame numbers of highest 3 predictions
                        for index in highestThree:
                            highestFrameNums.append(frameNum[index])

                        # Log to file that prediction was created
                        time = datetime.now()
                        f.write(
                            time.strftime("%d/%m/%Y %H:%M:%S") +
                            " -- Entered {0} into CNN algorithm\n".format(
                                video_path))
                        f.write("\tPrediction: " +
                                str(y_pred[:n].mean().item()) + "\n\n")
                        # Use the list of frame numbers to save suspicious images
                        save_sus_frames(highestFrameNums, video_path)
                        # Return single prediction mean from prediction on each frame
                        return y_pred[:n].mean().item()

        # Error handling
        except Exception as e:
            exc_type, exc_obj, tb = sys.exc_info()
            lineno = tb.tb_lineno
            time = datetime.now()
            log.write("\n%s -- Prediction error on video %s: %s\n" %
                      (time.strftime("%d/%m/%Y %H:%M:%S"), video_path, str(e)))
            print("Prediction error on video %s: %s %s" %
                  (video_path, lineno, str(e)))
        # Return 0.5 if error (0.5 == Not Sure)
        return 0.5

    # Grabs the three most suspicious frames based on the y predictions and saves them
    def save_sus_frames(highestFrameNums, video_path):
        # Create a video capture instance
        vid = cv2.VideoCapture(video_path)
        video_name = os.path.basename(video_path)
        video_name = os.path.splitext(video_name)[0]
        # Sort highest frame num list
        highestFrameNums = sorted(highestFrameNums)
        total_frames = vid.get(7)
        cd = os.getcwd()
        # Make directory for video
        if not os.path.isdir(video_name):
            os.mkdir('{0}'.format(video_name))
        # Set video to correct frame and save image frame
        for value in highestFrameNums:
            vid.set(1, value)
            ret, frame = vid.read()
            print('{0}/{1}/{1}_frame_{2}.jpg'.format(cd, video_name, value))
            cv2.imwrite(
                '{0}/{1}/{1}_frame_{2}.jpg'.format(cd, video_name, value),
                frame)

    from concurrent.futures import ThreadPoolExecutor

    # Predict on multiple videos
    def predict_on_video_set(videos, num_workers):
        def process_file(i):
            filename = videos[i]
            y_pred = predict_on_video(filename, batch_size=frames_per_video)
            return y_pred

        with ThreadPoolExecutor(max_workers=num_workers) as ex:
            predictions = ex.map(process_file, range(len(videos)))

        return list(predictions)

    # If True, will return a prediction of classification length
    # Ultimately was not able to use this in GUI due to threading difficulty
    speed_test = False

    if speed_test:
        start_time = time.time()
        speedtest_videos = test_videos[:5]
        predictions = predict_on_video_set(speedtest_videos, num_workers=4)
        elapsed = time.time() - start_time
        print("Elapsed %f sec. Average per video: %f sec." %
              (elapsed, elapsed / len(speedtest_videos)))

    model.eval()
    predictions = predict_on_video_set(test_videos, num_workers=4)
    f.close()
    return predictions
Ejemplo n.º 28
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    args.distributed = args.world_size > 1

    if args.distributed:
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size)

    # create model
    print("=> creating model '{}'".format(args.arch))
    model = get_model(args.arch,
                      in_size=(args.input_size, args.input_size),
                      num_classes=1000)

    if not args.distributed:
        model = torch.nn.DataParallel(model).cuda()
    else:
        model.cuda()
        model = torch.nn.parallel.DistributedDataParallel(model)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    title = 'ImageNet-' + args.arch
    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
            args.checkpoint = os.path.dirname(args.resume)
            logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
                            title=title,
                            resume=True)
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
            'Valid Acc.'
        ])

    cudnn.benchmark = True

    get_train_loader = get_pytorch_train_loader
    get_val_loader = get_pytorch_val_loader

    train_loader, train_loader_len = get_train_loader(
        args.data,
        args.batch_size,
        workers=args.workers,
        input_size=args.input_size)
    val_loader, val_loader_len = get_val_loader(args.data,
                                                args.batch_size,
                                                workers=args.workers,
                                                input_size=args.input_size)

    if args.evaluate:
        from collections import OrderedDict
        if os.path.isfile(args.weight):
            print("=> loading pretrained weight '{}'".format(args.weight))
            source_state = torch.load(args.weight)
            if 'state_dict' in source_state:
                source_state = source_state['state_dict']
            if 'model' in source_state:
                source_state = source_state['model']
            target_state = OrderedDict()
            for k, v in source_state.items():
                #if k.startswith('module.attacker.model.'):
                #    k = k[len('module.attacker.model.'):]
                #else:
                #    continue
                if k[:7] != 'module.':
                    k = 'module.' + k
                target_state[k] = v
            model.load_state_dict(target_state, strict=True)
        else:
            print("=> no weight found at '{}'".format(args.weight))

        validate(val_loader,
                 val_loader_len,
                 model,
                 criterion,
                 adv_eps=args.adv_eps,
                 euclidean_adv=args.euclidean)
        return

    # visualization
    writer = SummaryWriter(os.path.join(args.checkpoint, 'logs'))

    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)

        print(f'\nEpoch: [{epoch + 1} | {args.epochs}]')

        # train for one epoch
        train_loss, train_acc = train(train_loader, train_loader_len, model,
                                      criterion, optimizer, epoch)

        # evaluate on validation set
        val_loss, prec1, prec5, adv_prec1, adv_prec5 = validate(
            val_loader,
            val_loader_len,
            model,
            criterion,
            adv_eps=args.adv_eps,
            euclidean_adv=args.euclidean)

        lr = optimizer.param_groups[0]['lr']

        # append logger file
        logger.append([lr, train_loss, val_loss, train_acc, prec1])

        # tensorboardX
        writer.add_scalar('learning rate', lr, epoch + 1)
        writer.add_scalars('loss', {
            'train loss': train_loss,
            'validation loss': val_loss
        }, epoch + 1)
        writer.add_scalars('accuracy', {
            'train accuracy': train_acc,
            'validation accuracy': prec1
        }, epoch + 1)
        print(
            f'Val results: {prec1:.2f} ; {prec5:.2f} ; {adv_prec1:.2f} ; {adv_prec5:.2f}'
        )

        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            checkpoint=args.checkpoint)

    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))
    writer.close()

    print('Best accuracy:')
    print(best_prec1)
Ejemplo n.º 29
0
steps = 0
running_loss = 0
print_every = 5000
checkpoint = "./resnet50_pytorch/"

if not os.path.exists(checkpoint):
    os.makedirs(checkpoint)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# model = models.resnext50_32x4d(pretrained=True)
# model.fc = nn.Sequential(nn.Linear(2048, 1),
#                                  nn.Sigmoid())
# model = model.cuda()
from pytorchcv.model_provider import get_model
model = get_model("xception", pretrained=False)
model = nn.Sequential(*list(
    model.children())[:-1])  # Remove original output layer


class Pooling(nn.Module):
    def __init__(self):
        super(Pooling, self).__init__()

        self.p1 = nn.AdaptiveAvgPool2d((1, 1))
        self.p2 = nn.AdaptiveMaxPool2d((1, 1))

    def forward(self, x):
        x1 = self.p1(x)
        x2 = self.p2(x)
        return (x1 + x2) * 0.5
Ejemplo n.º 30
0
def prepare_model(model_name,
                  use_pretrained,
                  pretrained_model_file_path,
                  use_cuda,
                  use_data_parallel=True,
                  net_extra_kwargs=None,
                  load_ignore_extra=False,
                  num_classes=None,
                  in_channels=None,
                  remap_to_cpu=False,
                  remove_module=False):
    """
    Create and initialize model by name.

    Parameters
    ----------
    model_name : str
        Model name.
    use_pretrained : bool
        Whether to use pretrained weights.
    pretrained_model_file_path : str
        Path to file with pretrained weights.
    use_cuda : bool
        Whether to use CUDA.
    use_data_parallel : bool, default True
        Whether to use parallelization.
    net_extra_kwargs : dict, default None
        Extra parameters for model.
    load_ignore_extra : bool, default False
        Whether to ignore extra layers in pretrained model.
    num_classes : int, default None
        Number of classes.
    in_channels : int, default None
        Number of input channels.
    remap_to_cpu : bool, default False
        Whether to remape model to CPU during loading.
    remove_module : bool, default False
        Whether to remove module from loaded model.

    Returns
    -------
    Module
        Model.
    """
    kwargs = {"pretrained": use_pretrained}
    if num_classes is not None:
        kwargs["num_classes"] = num_classes
    if in_channels is not None:
        kwargs["in_channels"] = in_channels
    if net_extra_kwargs is not None:
        kwargs.update(net_extra_kwargs)

    net = get_model(model_name, **kwargs)

    if pretrained_model_file_path:
        assert (os.path.isfile(pretrained_model_file_path))
        logging.info("Loading model: {}".format(pretrained_model_file_path))
        checkpoint = torch.load(
            pretrained_model_file_path,
            map_location=(None if use_cuda and not remap_to_cpu else "cpu"))
        if (type(checkpoint) == dict) and ("state_dict" in checkpoint):
            checkpoint = checkpoint["state_dict"]

        if load_ignore_extra:
            pretrained_state = checkpoint
            model_dict = net.state_dict()
            pretrained_state = {
                k: v
                for k, v in pretrained_state.items() if k in model_dict
            }
            net.load_state_dict(pretrained_state)
        else:
            if remove_module:
                net_tmp = torch.nn.DataParallel(net)
                net_tmp.load_state_dict(checkpoint)
                net.load_state_dict(net_tmp.module.cpu().state_dict())
            else:
                net.load_state_dict(checkpoint)

    if use_data_parallel and use_cuda:
        net = torch.nn.DataParallel(net)

    if use_cuda:
        net = net.cuda()

    return net