def __init__(self, model="EB11", path = None,viewpoints=3, interval_size=32, interval_samples_per_game=None,splitting="whole",overlap="consecutive"):
        
        interval_samples_per_game = 20000/interval_size 
        self.interval_size = interval_size
        self.model = model
        customset_test = CustomDatasetViewpointIntervals(path = path,subset_type="testing",viewpoints = viewpoints,splitting="whole",overlap="consecutive", interval_samples_per_game = interval_samples_per_game,interval_size=interval_size,only_extract=True)

        self.testloader_acc = torch.utils.data.DataLoader(dataset=customset_test,batch_size=args.batch_size,shuffle=False,num_workers=args.num_workers)
        
        # print len(self.testloader_acc) # 9128


        if (model == "VGG"):
            self.model = VGG_viewpoints(num_classes=viewpoints).cuda()
            self.model.soft = nn.LogSoftmax()
        elif (model == "ED"):
            self.model_VGG = VGG_viewpoints(num_classes=viewpoints, mode="features").cuda()
            self.model_VGG.soft = nn.LogSoftmax()
            self.model_VGG.load_state_dict(torch.load("/scratch/datasets/NFLsegment/experiments/viewpoint_framewise/models/model_epoch_10.pth"))
            mod = list(self.model_VGG.classifier.children())
            for i in range(3):
                mod.pop()
            self.model_VGG.classifier = torch.nn.Sequential(*mod)
            self.model_ED = EncoderDecoderViewpoints(max_len=interval_size).cuda() # not on multiple gpus, since it needs to not distribute the interval images
            self.model_VGG = nn.DataParallel(self.model_VGG,device_ids=[0,1,2,3]).cuda() # this is per image, so we can distribute over 4 gpus
            self.model_ED.load_state_dict(torch.load("/scratch/datasets/NFLsegment/experiments/viewpoint_intervals/models/model_epoch_2_32_ed.pth"))
            self.model_VGG.load_state_dict(torch.load("/scratch/datasets/NFLsegment/experiments/viewpoint_intervals/models/model_epoch_2_32_vgg.pth"))
        elif (model == "EB11"):
            self.model_VGG = VGG_viewpoints(num_classes=viewpoints, mode="features")
            self.model_VGG.soft = nn.LogSoftmax()
            self.model_VGG.load_state_dict(torch.load("/scratch/datasets/NFLsegment/experiments/viewpoint_framewise_VGG11/model_epoch_5.pth"))
        elif (model == "EB"):
            self.model_ED = EncoderDecoderViewpoints(max_len=interval_size).cuda()
            #self.model_ED.load_state_dict(torch.load("/scratch/datasets/NFLsegment/experiments/viewpoint_intervals/models/model_epoch_2_32_ed.pth"))
            self.model_ED.init_VGG(model="/scratch/datasets/NFLsegment/experiments/viewpoint_framewise/models/model_epoch_10.pth")

        if model != "EB11":
            self.optimizer = optim.Adam(list(self.model_VGG.parameters()) + list(self.model_ED.parameters()), weight_decay=float(args.weight_decay), lr=0.0001)
            self.criterion = nn.NLLLoss().cuda()
            mylogger.log(self.model_VGG)

        #mylogger.log(self.model_ED)
        print("EBBBB")
Ejemplo n.º 2
0
class viewpoint_classifier():
    def __init__(self, model, dataset_index=0, path=None, viewpoints=3):

        if (model == "alex"):
            self.model = AlexNet()
        elif (model == "vgg"):
            self.model = VGG(num_classes=2)
        elif (model == "resnet"):
            self.model = ResNet()
        elif (model == "ED"):
            self.model_ED = EncoderDecoderViewpoints()

        self.model_vgg = VGG_viewpoints(num_classes=3).cuda()
        self.model_ed = EncoderDecoderViewpoints().cuda()

        self.model_vgg = nn.DataParallel(self.model_vgg,
                                         device_ids=[0, 1, 2, 3]).cuda()
        self.model_vgg.load_state_dict(
            torch.load(
                "./results/viewpoint_models/vgg_viewpoint_ED_prepared/model_epoch_2.pth"
            ))
        mod = list(self.model_vgg.module.classifier.children())
        mod.pop()
        mod.pop()
        mod.pop()
        new_classifier = torch.nn.Sequential(*mod)
        self.model_vgg.module.new_classifier = new_classifier
        print self.model_vgg

        # Trained ED loading, comment to disable

        self.model_ed.load_state_dict(
            torch.load(
                "./results/viewpoint_models/vgg_viewpoint_ED_disjointed/model_ed_epoch_20.pth"
            ))

        print self.model_ed
Ejemplo n.º 3
0
    def __init__(self, dataset_index=0, path=None):

        self.sampler = self.weighted_sampling(dataset_index=dataset_index,
                                              path=path)

        customset_train = CustomDatasetViewpoint(path=path,
                                                 subset_type="training",
                                                 dataset_index=dataset_index,
                                                 viewpoints=3)
        customset_test = CustomDatasetViewpoint(path=path,
                                                subset_type="testing",
                                                dataset_index=dataset_index,
                                                viewpoints=3)

        self.trainloader = torch.utils.data.DataLoader(
            pin_memory=True,
            dataset=customset_train,
            sampler=self.sampler,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.num_workers)
        self.trainloader_acc = torch.utils.data.DataLoader(
            dataset=customset_train,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.num_workers)
        self.testloader_acc = torch.utils.data.DataLoader(
            dataset=customset_test,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.num_workers)

        self.trainloader_seg = torch.utils.data.DataLoader(
            pin_memory=True,
            dataset=customset_train,
            batch_size=256,
            shuffle=False,
            num_workers=args.num_workers)
        self.trainloader_seg_encoder = torch.utils.data.DataLoader(
            pin_memory=True,
            dataset=customset_train,
            batch_size=1024,
            shuffle=False,
            num_workers=args.num_workers)
        self.trainloader_acc_seg = torch.utils.data.DataLoader(
            dataset=customset_train,
            batch_size=96,
            shuffle=False,
            num_workers=args.num_workers)
        self.trainloader_acc_seg_encoder = torch.utils.data.DataLoader(
            dataset=customset_train,
            batch_size=96 * 32,
            shuffle=False,
            num_workers=args.num_workers)
        self.testloader_acc_seg = torch.utils.data.DataLoader(
            dataset=customset_test,
            batch_size=256,
            shuffle=False,
            num_workers=args.num_workers)
        self.testloader_acc_seg_encoder = torch.utils.data.DataLoader(
            dataset=customset_test,
            batch_size=1024,
            shuffle=False,
            num_workers=args.num_workers)

        self.model_vgg = VGG_viewpoints(num_classes=3).cuda()
        self.model_ed = EncoderDecoderViewpoints().cuda()

        # Trained vgg loading, comment to disable
        self.model_vgg = nn.DataParallel(self.model_vgg,
                                         device_ids=[0, 1, 2, 3]).cuda()
        self.model_vgg.load_state_dict(
            torch.load(
                "./results/viewpoint_models/vgg_viewpoint_ED_prepared/model_epoch_2.pth"
            ))
        #self.model_vgg = self.model_vgg.module
        #self.model_vgg.cuda()
        mod = list(self.model_vgg.module.classifier.children())
        mod.pop()
        mod.pop()
        mod.pop()
        new_classifier = torch.nn.Sequential(*mod)
        self.model_vgg.module.new_classifier = new_classifier
        print self.model_vgg
        # Trained ED loading, comment to disable

        self.model_ed.load_state_dict(
            torch.load(
                "./results/viewpoint_models/vgg_viewpoint_ED_disjointed/model_ed_epoch_20.pth"
            ))
        print self.model_ed
        '''
        if args.pretrained_model != None:
            if args.pretrained_same_architecture:
                self.model.load_state_dict(torch.load(args.pretrained_model))
            else:
                if args.arch == "vgg":
                    self.model.soft = None
                    classifier = list(self.model.classifier.children())
                    classifier.pop()
                    classifier.append(torch.nn.Linear(4096,1000))
                    new_classifier = torch.nn.Sequential(*classifier)
                    self.model.classifier = new_classifier
                    self.model.load_state_dict(torch.load(args.pretrained_model))
                    classifier = list(self.model.classifier.children())
                    classifier.pop()
                    classifier.append(torch.nn.Linear(4096,2))
                    new_classifier = torch.nn.Sequential(*classifier)
                    self.model.classifier = new_classifier
                    self.model.soft = nn.LogSoftmax()
                else:
                    self.model.fc = nn.Linear(512, 1000)
                    self.model.load_state_dict(torch.load(args.pretrained_model))
                    self.model.fc = nn.Linear(512,2)
        '''

        self.optimizer_vgg = optim.Adam(self.model_vgg.parameters(),
                                        weight_decay=float(args.weight_decay),
                                        lr=0.0001)
        self.optimizer_ED = optim.Adam(self.model_ed.parameters(),
                                       weight_decay=float(args.weight_decay),
                                       lr=0.0001)
Ejemplo n.º 4
0
class viewpoint_classifier():
    def weighted_sampling(self, dataset_index=0, path=None):

        if not os.path.isfile(
                "./results/intermediate_data/sampling_weights_three_viewpoints.p"
        ):
            customset_preprocess = CustomDatasetViewpoint(
                path=args.dataset_path,
                subset_type="training",
                dataset_index=dataset_index,
                retrieve_images=False,
                viewpoints=3)
            self.processloader = torch.utils.data.DataLoader(
                dataset=customset_preprocess,
                batch_size=int(1),
                shuffle=False,
                num_workers=int(args.num_workers))

            sample_views = []  # when you start

            for batch_idx, (imgs, label) in enumerate(self.processloader):
                sample_views.append(label.numpy()[0][0])

            class_presence = [0, 0, 0]

            for view in sample_views:
                class_presence[view] += 1

            for i in range(len(class_presence)):
                class_presence[i] /= len(sample_views) * 1.0

            class_weights = [0 for i in range(len(sample_views))]
            for i in range(len(sample_views)):
                class_weights[i] = 1.0 / class_presence[sample_views[i]]
            m = 2 * len(sample_views)
            class_weights = [i / m for i in class_weights]

            # Finished with sampler weighting
            sampler = torch.utils.data.sampler.WeightedRandomSampler(
                class_weights, len(self.processloader), replacement=True)
            pickle.dump(
                sampler,
                open(
                    "./results/intermediate_data/sampling_weights_three_viewpoints.p",
                    "wb"))
        else:
            sampler = pickle.load(
                open(
                    "./results/intermediate_data/sampling_weights_three_viewpoints.p",
                    "rb"))
        return sampler

    def __init__(self, dataset_index=0, path=None):

        self.sampler = self.weighted_sampling(dataset_index=dataset_index,
                                              path=path)

        customset_train = CustomDatasetViewpoint(path=path,
                                                 subset_type="training",
                                                 dataset_index=dataset_index,
                                                 viewpoints=3)
        customset_test = CustomDatasetViewpoint(path=path,
                                                subset_type="testing",
                                                dataset_index=dataset_index,
                                                viewpoints=3)

        self.trainloader = torch.utils.data.DataLoader(
            pin_memory=True,
            dataset=customset_train,
            sampler=self.sampler,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.num_workers)
        self.trainloader_acc = torch.utils.data.DataLoader(
            dataset=customset_train,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.num_workers)
        self.testloader_acc = torch.utils.data.DataLoader(
            dataset=customset_test,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.num_workers)

        self.trainloader_seg = torch.utils.data.DataLoader(
            pin_memory=True,
            dataset=customset_train,
            batch_size=256,
            shuffle=False,
            num_workers=args.num_workers)
        self.trainloader_seg_encoder = torch.utils.data.DataLoader(
            pin_memory=True,
            dataset=customset_train,
            batch_size=1024,
            shuffle=False,
            num_workers=args.num_workers)
        self.trainloader_acc_seg = torch.utils.data.DataLoader(
            dataset=customset_train,
            batch_size=96,
            shuffle=False,
            num_workers=args.num_workers)
        self.trainloader_acc_seg_encoder = torch.utils.data.DataLoader(
            dataset=customset_train,
            batch_size=96 * 32,
            shuffle=False,
            num_workers=args.num_workers)
        self.testloader_acc_seg = torch.utils.data.DataLoader(
            dataset=customset_test,
            batch_size=256,
            shuffle=False,
            num_workers=args.num_workers)
        self.testloader_acc_seg_encoder = torch.utils.data.DataLoader(
            dataset=customset_test,
            batch_size=1024,
            shuffle=False,
            num_workers=args.num_workers)

        self.model_vgg = VGG_viewpoints(num_classes=3).cuda()
        self.model_ed = EncoderDecoderViewpoints().cuda()

        # Trained vgg loading, comment to disable
        self.model_vgg = nn.DataParallel(self.model_vgg,
                                         device_ids=[0, 1, 2, 3]).cuda()
        self.model_vgg.load_state_dict(
            torch.load(
                "./results/viewpoint_models/vgg_viewpoint_ED_prepared/model_epoch_2.pth"
            ))
        #self.model_vgg = self.model_vgg.module
        #self.model_vgg.cuda()
        mod = list(self.model_vgg.module.classifier.children())
        mod.pop()
        mod.pop()
        mod.pop()
        new_classifier = torch.nn.Sequential(*mod)
        self.model_vgg.module.new_classifier = new_classifier
        print self.model_vgg
        # Trained ED loading, comment to disable

        self.model_ed.load_state_dict(
            torch.load(
                "./results/viewpoint_models/vgg_viewpoint_ED_disjointed/model_ed_epoch_20.pth"
            ))
        print self.model_ed
        '''
        if args.pretrained_model != None:
            if args.pretrained_same_architecture:
                self.model.load_state_dict(torch.load(args.pretrained_model))
            else:
                if args.arch == "vgg":
                    self.model.soft = None
                    classifier = list(self.model.classifier.children())
                    classifier.pop()
                    classifier.append(torch.nn.Linear(4096,1000))
                    new_classifier = torch.nn.Sequential(*classifier)
                    self.model.classifier = new_classifier
                    self.model.load_state_dict(torch.load(args.pretrained_model))
                    classifier = list(self.model.classifier.children())
                    classifier.pop()
                    classifier.append(torch.nn.Linear(4096,2))
                    new_classifier = torch.nn.Sequential(*classifier)
                    self.model.classifier = new_classifier
                    self.model.soft = nn.LogSoftmax()
                else:
                    self.model.fc = nn.Linear(512, 1000)
                    self.model.load_state_dict(torch.load(args.pretrained_model))
                    self.model.fc = nn.Linear(512,2)
        '''

        self.optimizer_vgg = optim.Adam(self.model_vgg.parameters(),
                                        weight_decay=float(args.weight_decay),
                                        lr=0.0001)
        self.optimizer_ED = optim.Adam(self.model_ed.parameters(),
                                       weight_decay=float(args.weight_decay),
                                       lr=0.0001)
Ejemplo n.º 5
0
    def __init__(self, model="VGG", path=None, viewpoints=3):

        self.sampler = self.weighted_sampling(path=path, viewpoints=viewpoints)

        customset_train = CustomDatasetViewpointFramewise(
            path=path, subset_type="training", viewpoints=viewpoints)
        customset_test = CustomDatasetViewpointFramewise(path=path,
                                                         subset_type="testing",
                                                         viewpoints=viewpoints)

        self.trainloader = torch.utils.data.DataLoader(
            dataset=customset_train,
            sampler=self.sampler,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.num_workers)
        self.trainloader_acc = torch.utils.data.DataLoader(
            dataset=customset_train,
            batch_size=args.batch_size,
            shuffle=False,
            num_workers=args.num_workers)
        self.testloader_acc = torch.utils.data.DataLoader(
            dataset=customset_test,
            batch_size=args.batch_size,
            shuffle=False,
            num_workers=args.num_workers)

        #print len(self.trainloader) #434
        #print len(self.trainloader_acc) #434
        #print len(self.testloader_acc) #405, no scoreboard augmentation

        if (model == "VGG"):
            self.model = VGG_viewpoints(num_classes=viewpoints).cuda()
            self.model.soft = nn.LogSoftmax()
            if args.pretrained_same_architecture is not None:
                self.model.load_state_dict(
                    torch.load(args.pretrained_same_architecture))
        elif (model == "ED"):
            self.model = EncoderDecoderViewpoints()

        if args.pretrained_model != None:
            if args.pretrained_same_architecture is not None:
                self.model.load_state_dict(torch.load(args.pretrained_model))
            else:
                if args.arch == "vgg":
                    self.model.soft = None
                    classifier = list(self.model.classifier.children())
                    classifier.pop()
                    classifier.append(torch.nn.Linear(4096, 1000))
                    new_classifier = torch.nn.Sequential(*classifier)
                    self.model.classifier = new_classifier
                    self.model.load_state_dict(
                        torch.load(args.pretrained_model))
                    classifier = list(self.model.classifier.children())
                    classifier.pop()
                    classifier.append(torch.nn.Linear(4096, 2))
                    new_classifier = torch.nn.Sequential(*classifier)
                    self.model.classifier = new_classifier
                    self.model.soft = nn.LogSoftmax()
                else:
                    self.model.fc = nn.Linear(512, 1000)
                    self.model.load_state_dict(
                        torch.load(args.pretrained_model))
                    self.model.fc = nn.Linear(512, 2)

        self.optimizer = optim.Adam(self.model.parameters(),
                                    weight_decay=float(args.weight_decay),
                                    lr=0.0001)
        self.criterion = nn.NLLLoss().cuda()
        mylogger.log(self.model)