Exemplo n.º 1
0
    def __init__(self, args, vis=None):
        super(CRWTeacherStudent, self).__init__()
        self.args = args

        self.edgedrop_rate = getattr(args, 'dropout', 0)
        self.featdrop_rate = getattr(args, 'featdrop', 0)
        self.temperature = getattr(args, 'temp', getattr(args, 'temperature', 0.07))

        self.encoder = utils.make_encoder(args).to(self.args.device)
        self.infer_dims() # can use same instance attributes for teacher
        self.selfsim_fc = self.make_head(depth=getattr(args, 'head_depth', 0))

        # Teacher Components
        self.teacher = CRWBase(args)
        pretrained_state_dict = torch.load(args.path_to_pretrained)
        self.teacher.load_state_dict(pretrained_state_dict['model'])
        self.teacher.to(self.args.device)

        # Freeze teacher model
        for param in self.teacher.parameters():
            param.requires_grad = False

        self.xent = nn.CrossEntropyLoss(reduction="none")
        self.soft_xent = SoftCrossEntropyLoss()
        self._xent_targets = dict()
        self.alpha = args.alpha_teacher_student
        assert 0 <= self.alpha <= 1, "alpha_teacher_student must be in the interval [0, 1]"

        self.dropout = nn.Dropout(p=self.edgedrop_rate, inplace=False)
        self.featdrop = nn.Dropout(p=self.featdrop_rate, inplace=False)

        self.flip = getattr(args, 'flip', False)
        self.sk_targets = getattr(args, 'sk_targets', False)
        self.vis = vis
Exemplo n.º 2
0
    def __init__(self, args, vis=None):
        super(CRWBase, self).__init__()
        self.args = args

        self.edgedrop_rate = getattr(args, 'dropout', 0)
        self.featdrop_rate = getattr(args, 'featdrop', 0)
        self.temperature = getattr(args, 'temp', getattr(args, 'temperature', 0.07))

        self.encoder = utils.make_encoder(args).to(self.args.device)
        self.infer_dims()
        self.selfsim_fc = self.make_head(depth=getattr(args, 'head_depth', 0))

        self.xent = nn.CrossEntropyLoss(reduction="none")
        self._xent_targets = dict()

        self.dropout = nn.Dropout(p=self.edgedrop_rate, inplace=False)
        self.featdrop = nn.Dropout(p=self.featdrop_rate, inplace=False)

        self.flip = getattr(args, 'flip', False)
        self.sk_targets = getattr(args, 'sk_targets', False)
        self.vis = vis
    def __init__(self, args, vis=None):
        super(CRW, self).__init__()
        self.args = args

        self.edgedrop_rate = getattr(args, "dropout", 0)
        self.featdrop_rate = getattr(args, "featdrop", 0)
        self.temperature = getattr(args, "temp", getattr(args, "temperature", 0.07))

        self.encoder = utils.make_encoder(args).to(self.args.device)
        self.infer_dims()
        self.selfsim_fc = self.make_head(depth=getattr(args, "head_depth", 0))
        self.zero_softmax = ZeroSoftmax()

        self.xent = nn.CrossEntropyLoss(reduction="none")
        self._xent_targets = dict()

        self.dropout = nn.Dropout(p=self.edgedrop_rate, inplace=False)
        self.featdrop = nn.Dropout(p=self.featdrop_rate, inplace=False)

        self.flip = getattr(args, "flip", False)
        self.sk_targets = getattr(args, "sk_targets", False)
        self.vis = vis

        self.dilation_kernel = utils.make_dilation_kernel(args) if args.dilate_superpixels else None
Exemplo n.º 4
0
    def __init__(self, args=None, vis=None):
        super(TimeCycle, self).__init__()
        
        self.args = args

        if args is not None:
            self.kldv_coef = getattr(args, 'kldv_coef', 0)
            self.xent_coef = getattr(args, 'xent_coef', 0)
            self.zero_diagonal = getattr(args, 'zero_diagonal', 0)
            self.dropout_rate = getattr(args, 'dropout', 0)
            self.featdrop_rate = getattr(args, 'featdrop', 0)
            self.model_type = getattr(args, 'model_type', 'scratch')
            self.temperature = getattr(args, 'temp', getattr(args, 'temperature',1))
            self.shuffle = getattr(args, 'shuffle', 0)
            self.xent_weight = getattr(args, 'xent_weight', False)
        else:
            self.kldv_coef = 0
            self.xent_coef = 0
            self.long_coef = 1
            self.skip_coef = 0

            # self.sk_align = False
            # self.sk_targets = True
            
            self.zero_diagonal = 0
            self.dropout_rate = 0
            self.featdrop_rate = 0
            self.model_type = 'scratch'
            self.temperature = 1
            self.shuffle = False
            self.xent_weight = False

        print('Model temp:', self.temperature)
        self.encoder = utils.make_encoder(args).cuda()


        self.infer_dims()

        self.selfsim_fc = self.make_head(depth=self.garg('head_depth', 0))
        self.selfsim_head = self.make_conv3d_head(depth=1)
        self.context_head = self.make_conv3d_head(depth=1)

        # self.selfsim_head = self.make_head([self.enc_hid_dim, 2*self.enc_hid_dim, self.enc_hid_dim])
        # self.context_head = self.make_head([self.enc_hid_dim, 2*self.enc_hid_dim, self.enc_hid_dim])

        import resnet3d, resnet2d
        if self.garg('cal_coef', 0) > 0:
            self.stack_encoder = utils.make_stack_encoder(self.enc_hid_dim)
            # self.aff_encoder = resnet2d.Bottleneck(1, 128,)

        # # assuming no fc pre-training
        # for m in self.modules():
        #     if isinstance(m, nn.Linear):
        #         m.weight.data.normal_(0, 0.01)
        #         m.bias.data.zero_()

        self.edge = getattr(args, 'edgefunc', 'softmax')

        # self.kldv = torch.nn.KLDivLoss(reduction="batchmean")
        self.kldv = torch.nn.KLDivLoss(reduction="batchmean")
        self.xent = torch.nn.CrossEntropyLoss(reduction="none")

        self.target_temp = 1

        self._xent_targets = {}
        self._kldv_targets = {}
        
        if self.garg('restrict', 0) > 0:
            self.restrict = utils.RestrictAttention(int(args.restrict))
        else:
            self.restrict =  None

        self.dropout = torch.nn.Dropout(p=self.dropout_rate, inplace=False)
        self.featdrop = torch.nn.Dropout(p=self.featdrop_rate, inplace=False)

        self.viz = visdom.Visdom(port=8095, env='%s_%s' % (getattr(args, 'name', 'test'), '')) #int(time.time())))
        self.viz.close()

        if not self.viz.check_connection():
            self.viz = None

        if vis is not None:
            self._viz = vis
    
        p_sz, stride = 64, 32
        self.k_patch =  nn.Sequential(
            K.RandomResizedCrop(size=(p_sz, p_sz), scale=(0.7, 0.9), ratio=(0.7, 1.3))
        )

        mmm, sss = torch.Tensor([0.485, 0.456, 0.406]), torch.Tensor([0.229, 0.224, 0.225])

        self.k_frame = nn.Sequential(
            # kornia.color.Normalize(mean=-mmm/sss, std=1/sss),
            # K.ColorJitter(0.1, 0.1, 0.1, 0),
            # K.RandomResizedCrop(size=(256, 256), scale=(0.8, 0.9), ratio=(0.7, 1.3)),
            # kornia.color.Normalize(mean=mmm, std=sss)
        )
        
        # self.k_frame_same = nn.Sequential(
        #     K.RandomResizedCrop(size=(256, 256), scale=(0.8, 1.0), same_on_batch=True)
        # )
        # self.k_frame_same = None
        
        self.k_frame_same = nn.Sequential(
            kornia.geometry.transform.Resize(256 + 20),
            K.RandomHorizontalFlip(same_on_batch=True),
            K.RandomCrop((256, 256), same_on_batch=True),
        )

        self.unfold = torch.nn.Unfold((p_sz,p_sz), dilation=1, padding=0, stride=(stride, stride))

        self.ent_stats = utils.RunningStats(1000)