def __init__(self, model="dimo", dataset_index=0):

        # First, create the weighted sampler by analyzing the dataset and ascribing proper class weights

        self.num_inner_nodes = 2
        self.length_of_sequence = 16

        pretrained_model_selection = args.pretrained_model
        #pretrained_model_selection = "./results/viewpoint_models/vgg_viewpoint_experiments/v1/model_epoch_5.pth"
        self.model = DimoAutoSequence(
            pretrained_model=pretrained_model_selection,
            num_inner_nodes=self.num_inner_nodes,
            max_len=self.length_of_sequence)
        self.model = nn.DataParallel(self.model, device_ids=[0, 1, 2,
                                                             3]).cuda()

        self.model.load_state_dict(torch.load(args.saved_model))

        mylogger.log("Playtype classifier ready")
        print self.model
Exemple #2
0
    def __init__(self, model="dimo" ,dataset_index=0):

        # First, create the weighted sampler by analyzing the dataset and ascribing proper class weights

        self.num_inner_nodes = 2
        self.length_of_sequence = 16

        customset_preprocess = CustomMasterDatasetPlays(path = args.dataset_path,subset_type="training",dataset_index=dataset_index,categories=args.categories, retrieve_images=False)
        self.processloader = torch.utils.data.DataLoader(dataset=customset_preprocess,batch_size=int(1),shuffle=False,num_workers=int(args.num_workers))

        sample_plays = [] # when you start
            
        for batch_idx, (imgs, play_type) in enumerate(self.processloader):
            sample_plays.append(play_type.cpu().numpy()[0][0])

        cat = 3
        if args.categories == "unchunked":
            cat = 13
        if args.categories == "two":
            cat = 2
        self.cat = cat
        class_presence = [0 for i in range(self.cat)]

        for play in sample_plays:
            class_presence[play] += 1

        for i in range(cat):
            class_presence[i] /= len(sample_plays)*1.0

        class_weights = [0 for i in range(len(sample_plays))]
        for i in range(len(sample_plays)):
            class_weights[i] = 1.0/class_presence[sample_plays[i]]
        m = sum(class_weights)
        class_weights = [i*1.0/m for i in class_weights]

        customset_train = CustomMasterDatasetPlays(path = args.dataset_path,subset_type="training",dataset_index=dataset_index,categories=args.categories,frame_select = self.length_of_sequence)
        self.trainset = customset_train
        mylogger.log("Loaded {} dataset with {} number of plays".format(customset_train.subset_type, customset_train.maxlength))
        customset_excite = CustomMasterDatasetPlays(path = args.dataset_path,subset_type="exciting",dataset_index=dataset_index,categories=args.categories,frame_select = self.length_of_sequence)
        self.exciteset = customset_excite
        mylogger.log("Loaded {} dataset with {} number of plays".format("exciting", customset_train.maxlength))

        customset_test = CustomMasterDatasetPlays(path = args.dataset_path,subset_type="testing",dataset_index=dataset_index,categories=args.categories,frame_select = self.length_of_sequence)
        self.testset = customset_test
        mylogger.log("Loaded {} dataset with {} number of plays".format(customset_test.subset_type, customset_test.maxlength))


        # Finished with sampler weighting
        sampler = torch.utils.data.sampler.WeightedRandomSampler(class_weights,len(self.processloader),replacement=True)
        self.trainloader = torch.utils.data.DataLoader(dataset=customset_train,sampler=sampler,batch_size=int(args.batch_size),num_workers=int(args.num_workers))
        self.train_acc_loader = torch.utils.data.DataLoader(dataset=customset_train,batch_size=int(args.batch_size),shuffle=False,num_workers=int(args.num_workers))
        self.test_acc_loader = torch.utils.data.DataLoader(dataset=customset_test,batch_size=int(args.batch_size),shuffle=False,num_workers=int(args.num_workers))  
        self.exciteloader_train = torch.utils.data.DataLoader(dataset=customset_excite,batch_size=1,shuffle=False,num_workers=int(args.num_workers))
        self.exciteloader = torch.utils.data.DataLoader(dataset=customset_test,batch_size=1,shuffle=True,num_workers=int(args.num_workers))

        self.lr = 0.0001
        mylogger.log("-dotted-line")   
        mylogger.log("Using worker count: {}".format(args.num_workers))
        mylogger.log("Using epoch count: {}".format(args.epochs))
        mylogger.log("Using dataset index: {}".format(args.dataset_index)) 
        mylogger.log("Using model  <{}>".format(args.pretrained_model))
        mylogger.log("Using weight decay: {}".format(args.weight_decay))
        if args.training:
            mylogger.log("Training schedule type <training>")
        mylogger.log("Using categories  <{}>".format(args.categories))
        mylogger.log("Using learning rate  <{}>".format(self.lr))

        pretrained_model_selection = args.pretrained_model
        #pretrained_model_selection = "./results/viewpoint_models/vgg_viewpoint_experiments/v1/model_epoch_5.pth"
        self.model = DimoAutoSequence(pretrained_model=pretrained_model_selection,num_inner_nodes=self.num_inner_nodes,max_len=self.length_of_sequence)
        self.model = nn.DataParallel(self.model,device_ids=[0,1,2,3]).cuda()
        if args.saved_model != None:
            self.model.load_state_dict(torch.load(args.saved_model))

        self.optimizer = optim.Adam(self.model.parameters(), weight_decay=float(args.weight_decay), lr= self.lr)
        self.criterion = nn.NLLLoss().cuda()
        mylogger.log("Playtype classifier ready")
        print self.model
Exemple #3
0
    def __init__(self, model="dimo", dataset_index=0):

        # First, create the weighted sampler by analyzing the dataset and ascribing proper class weights

        print "Initializing playtype classifier"

        final_length = 16
        view = "both"

        first_save = False
        sampler = None
        if first_save:  # takes > 1 minute to even compute, compare with 5 seconds load time
            customset_preprocess = MasterPlaysets(
                path=args.dataset_path,
                subset_type="training",
                retrieve_images=False,
                subsampling=None,
                fill=None,
                from_file="gt",
                part="part",
                view=view)  # gt vs est, whole vs. part, 'both' vs '0' vs '1'

            self.processloader = torch.utils.data.DataLoader(
                dataset=customset_preprocess,
                batch_size=int(1),
                shuffle=False,
                num_workers=int(args.num_workers))

            class_presence = [0, 0, 0]
            for batch_idx, (imgs, play_type) in enumerate(self.processloader):
                class_presence[play_type.cpu().numpy()[0][0]] += 1

            sample_weights = [
                (1.0 / class_presence[play_type.cpu().numpy()[0][0]])
                for (imgs, play_type) in self.processloader
            ]

            sampler = torch.utils.data.sampler.WeightedRandomSampler(
                sample_weights, len(self.processloader), replacement=True)

            # mstd = MasterPlaysets(path = args.dataset_path,subset_type="training", retrieve_images=True, subsampling=None,fill=None, from_file="gt", part="part", view="both") # gt vs est, whole vs. part, 'both' vs '0' vs '1'
            # self.processloader_weighted = torch.utils.data.DataLoader(sampler=sampler,dataset=mstd,batch_size=int(1),shuffle=False,num_workers=int(args.num_workers))
            # customset_preprocess.compute_mean_std(self.processloader_weighted)

            print "Loading datasets"
            customset_train = MasterPlaysets(
                path=args.dataset_path,
                subset_type="training",
                retrieve_images=True,
                subsampling=None,
                fill=None,
                from_file="gt",
                part="part",
                view=view)  # gt vs est, whole vs. part, 'both' vs '0' vs '1'
            customset_test = MasterPlaysets(
                path=args.dataset_path,
                subset_type="testing",
                retrieve_images=True,
                subsampling=None,
                fill=None,
                from_file="gt",
                part="part",
                view=view)  # gt vs est, whole vs. part, 'both' vs '0' vs '1'

            customsets = [
                customset_preprocess, customset_train, customset_test
            ]
            pickle.dump(customsets, open("./junk/customset_v0.pkl", "wb"))
        else:
            customsets = pickle.load(open("./junk/customset_v0.pkl", "rb"))
            customset_preprocess, customset_train, customset_test = customsets
            self.processloader = torch.utils.data.DataLoader(
                dataset=customset_preprocess,
                batch_size=int(1),
                shuffle=False,
                num_workers=int(args.num_workers))
            class_presence = [0, 0, 0]
            for batch_idx, (imgs, play_type) in enumerate(self.processloader):
                class_presence[play_type.cpu().numpy()[0][0]] += 1
            sample_weights = [
                (1.0 / class_presence[play_type.cpu().numpy()[0][0]])
                for (imgs, play_type) in self.processloader
            ]
            sampler = torch.utils.data.sampler.WeightedRandomSampler(
                sample_weights, len(self.processloader), replacement=True)

        customset_train.proper_length = final_length
        customset_test.proper_length = final_length

        self.trainloader = torch.utils.data.DataLoader(
            dataset=customset_train,
            batch_size=int(args.batch_size),
            shuffle=False,
            num_workers=int(args.num_workers))
        self.train_acc_loader = torch.utils.data.DataLoader(
            dataset=customset_train,
            batch_size=int(args.batch_size),
            shuffle=False,
            num_workers=int(args.num_workers))
        #self.trainloader = torch.utils.data.DataLoader(sampler=sampler,dataset=customset_train,batch_size=int(args.batch_size),num_workers=int(args.num_workers))
        #self.train_acc_loader = torch.utils.data.DataLoader(sampler=sampler,dataset=customset_train,batch_size=int(args.batch_size),num_workers=int(args.num_workers))
        #self.train_acc_loader = torch.utils.data.DataLoader(dataset=customset_train,batch_size=int(args.batch_size),shuffle=False,num_workers=int(args.num_workers))
        self.test_acc_loader = torch.utils.data.DataLoader(
            dataset=customset_test,
            batch_size=int(args.batch_size),
            shuffle=False,
            num_workers=int(args.num_workers))

        self.lr = 0.0001
        mylogger.log("-dotted-line")
        mylogger.log("Using worker count: {}".format(args.num_workers))
        mylogger.log("Using epoch count: {}".format(args.epochs))
        mylogger.log("Using model  <{}>".format(args.pretrained_model))
        mylogger.log("Using weight decay: {}".format(args.weight_decay))
        if args.training:
            mylogger.log("Training schedule type <training>")
        mylogger.log("Using learning rate  <{}>".format(self.lr))

        pretrained_model_selection = args.pretrained_model
        #pretrained_model_selection = "./results/viewpoint_models/vgg_viewpoint_experiments/v1/model_epoch_5.pth"

        print "Building model"
        self.num_inner_nodes = 2
        self.length_of_sequence = final_length
        self.model = DimoAutoSequence(
            pretrained_model=pretrained_model_selection,
            num_inner_nodes=self.num_inner_nodes,
            max_len=self.length_of_sequence).cuda()
        self.model = nn.DataParallel(self.model, device_ids=[0, 1, 2,
                                                             3]).cuda()
        if args.saved_model != None:
            self.model.load_state_dict(torch.load(args.saved_model))

        self.optimizer = optim.Adam(self.model.parameters(),
                                    weight_decay=float(args.weight_decay),
                                    lr=self.lr)
        self.criterion = nn.NLLLoss().cuda()
        mylogger.log("Playtype classifier ready")
        mylogger.log(self.model)