示例#1
0
    def __init__(self, dropout=0.2, logstd1=-1, logstd2=-2, pi=0.5):
        super(GestureSpotting, self).__init__()
        type = bl.ModelType.MC_DROP

        linear_args = {
            "mu": 0,
            "logstd1": logstd1,
            "logstd2": logstd2,
            "pi": pi,
            "type": type,
            "dropout": dropout
        }
        rnn_args = {
            "mu": 0,
            "logstd1": logstd1,
            "logstd2": logstd2,
            "pi": pi,
            "type": type,
            "dropout": dropout
        }
        last_linear_args = {
            "mu": 0,
            "logstd1": logstd1,
            "logstd2": logstd2,
            "pi": pi,
            "type": type,
            "dropout": 0
        }
        stats = np.load("train_stats.npy")
        #Embedding layers
        self.bfc1 = bl.Linear(54, 32, **linear_args)
        self.bfc2 = bl.Linear(32, 32, **linear_args)
        self.fc1 = nn.Sequential(self.bfc1, nn.ReLU())
        self.fc2 = nn.Sequential(self.bfc2, nn.ReLU())

        #attributs
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        self.hidden_state = None
        self.mean, self.std = stats[0], stats[1] + 1e-18

        #model parameters
        self.input_size = 32
        self.output_size = 2
        self.hidden_dim = 64
        self.n_layers = 1

        #Recurrent Bayesian Layer
        self.lstm = bl.LSTM( input_size = self.input_size,\
                                    hidden_size = self.hidden_dim, num_layers = self.n_layers, \
                                    batch_first=True,**rnn_args)

        # dropout layer
        self.fc_combine = bl.Linear(self.input_size, self.input_size,
                                    **linear_args)
        self.combine = nn.Sequential(self.fc_combine, nn.ReLU())

        # Classifier layer
        self.fc = bl.Linear(self.hidden_dim, self.output_size,
                            **last_linear_args)
示例#2
0
    def __init__(self,  output_size, hidden_dim,n_layers, mode = "mc", dropout = 0.5, logstd1 = -1, logstd2 = -2, pi = 0.5 ):
        super(IterStarRGBHandModel, self).__init__()
        #type = bl.ModelType.VAR_DROP_B_ADAP
        mode = mode.lower()
        if mode == "mc": self.type =  bl.ModelType.MC_DROP
        elif mode == "vd": self.type = bl.ModelType.VAR_DROP_B_ADAP
        elif mode == "bbb": self.type = bl.ModelType.BBB
        else: self.type = bl.ModelType.DET
        
        rnn_args =  {
            "mu":0,
             "logstd1":logstd1,
             "logstd2":logstd2,
             "pi":pi,
              "type":self.type,
             "dropout":dropout
             }
        last_linear_args = {
            "mu":0,
             "logstd1":logstd1,
             "logstd2":logstd2,
             "pi":pi,
             "type":bl.ModelType.DET,
             "dropout":0
             }

        resnet_mov = models.resnet50(pretrained=True)
        self.mov= nn.Sequential(*(list(resnet_mov.children())[:-1]),SqueezeExtractor(), nn.Conv1d(1, 1, 3, stride=2),nn.ReLU())
        resnet_hand = models.resnet50(pretrained=True)
        self.hand= nn.Sequential(*(list(resnet_hand.children())[:-1]), SqueezeExtractor(), nn.Conv1d(1, 1, 3, stride=2),nn.ReLU())

        #Embedding
        self.embeding_size = 1023
        self.soft = SoftAttention(1023,128)

        # nn.MaxPool1d(2, stride=2)
        weights = [1./1.5, 1.0]

        class_weights = torch.FloatTensor(weights).cuda()
        self.loss_fn = nn.NLLLoss(weight = class_weights)  if output_size == 2 else nn.NLLLoss()
        self.output_size = output_size #12
        self.hidden_dim = hidden_dim #256
        self.n_layers = n_layers #2
        self.sharpen = False
        self.lstm = bl.LSTM( input_size = self.embeding_size,\
                                    hidden_size = self.hidden_dim, num_layers = self.n_layers, \
                                    batch_first=True,**rnn_args) 
                                    

        self.dropout = dropout
        self.fc = bl.Linear(self.hidden_dim, self.output_size, **last_linear_args)
        self.dp_training = True
        self._baysian_layers = []
        self.hidden = None
        self.td = TimeDistributed()
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        # self.device = torch.device( 'cpu')
        self.frozen = False
 def __init__(self,  input_size = 512, hidden_size = 128, type = "det" ):
     super(SoftAttention, self).__init__()
     if type == "mc": self.type =  bl.ModelType.MC_DROP
     elif type == "vd": self.type = bl.ModelType.VAR_DROP_B_ADAP
     elif type == "bbb": self.type = bl.ModelType.BBB
     else: self.type = bl.ModelType.DET
     args = {
          "mu":0,
          "logstd1":1.0,
          "logstd2":1.0,
          "pi":0.5,
          "type":self.type,
          "dropout":0
          }
     self.softatt = nn.Sequential(bl.Linear(input_size, hidden_size, **args),
                                  nn.ReLU(),
                                  bl.Linear(hidden_size, 1, **args),
                                 )
     self.weights = None
示例#4
0
    def __init__(self,
                 data_type,
                 input_size,
                 output_size,
                 hidden_dim,
                 n_layers,
                 mode="mc",
                 dropout=0.5,
                 logstd1=-1,
                 logstd2=-2,
                 pi=0.5):
        super(RTGR, self).__init__()
        #type = bl.ModelType.VAR_DROP_B_ADAP
        if mode == "mc": type = bl.ModelType.MC_DROP
        elif mode == "vd": type = bl.ModelType.VAR_DROP_B_ADAP
        elif mode == "bbb": type = bl.ModelType.BBB
        else: type = bl.ModelType.DET

        linear_args = {
            "mu": 0,
            "logstd1": logstd1,
            "logstd2": logstd2,
            "pi": pi,
            "type": type,
            "dropout": dropout
        }
        rnn_args = {
            "mu": 0,
            "logstd1": logstd1,
            "logstd2": logstd2,
            "pi": pi,
            "type": type,
            "dropout": dropout
        }
        last_linear_args = {
            "mu": 0,
            "logstd1": logstd1,
            "logstd2": logstd2,
            "pi": pi,
            "type": type,
            "dropout": 0
        }
        self.data_type = data_type

        #Embedding

        self.input_size = input_size
        self.embeding_size = 16
        self.bfc1 = bl.Linear(self.input_size, 32, **linear_args)
        self.bfc2 = bl.Linear(32, self.embeding_size, **linear_args)
        self.fc1 = nn.Sequential(self.bfc1, nn.ReLU())
        self.fc2 = nn.Sequential(self.bfc2, nn.ReLU())
        #weights = [1./21, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,1.0, 1.0, 1.0, 1.0, 1.0]
        weights = [1. / 1.5, 1.0]

        class_weights = torch.FloatTensor(weights).cuda()
        self.loss_fn = nn.NLLLoss(
            weight=class_weights) if output_size == 2 else nn.NLLLoss()
        self.output_size = output_size  #12
        self.hidden_dim = hidden_dim  #256
        self.n_layers = n_layers  #2
        self.sharpen = False
        self.lstm = bl.LSTM( input_size = self.embeding_size,\
                                    hidden_size = self.hidden_dim, num_layers = self.n_layers, \
                                    batch_first=True,**rnn_args)

        self.dropout = dropout

        self.fc = bl.Linear(self.hidden_dim, self.output_size,
                            **last_linear_args)
        self.dp_training = True
        self._baysian_layers = []
    def __init__(self, data_type, input_size, output_size, hidden_dim,n_layers, mode = "mc", dropout = 0.5, logstd1 = -1, logstd2 = -2, pi = 0.5 ):
        super(RTGR, self).__init__()
        #type = bl.ModelType.VAR_DROP_B_ADAP
        if mode == "mc": type =  bl.ModelType.MC_DROP
        elif mode == "vd": type = bl.ModelType.VAR_DROP_B_ADAP
        elif mode == "bbb": type = bl.ModelType.BBB
        else: type = bl.ModelType.DET
        
        linear_args =  {
            "mu":0,
             "logstd1":logstd1,
             "logstd2":logstd2,
             "pi":pi,
             "type":bl.ModelType.DET,
             "dropout":dropout
             }
        rnn_args =  {
            "mu":0,
             "logstd1":logstd1,
             "logstd2":logstd2,
             "pi":pi,
              "type":type,
             "dropout":dropout
             }
        last_linear_args = {
            "mu":0,
             "logstd1":logstd1,
             "logstd2":logstd2,
             "pi":pi,
             "type":type,
             "dropout":0
             }
        self.data_type = data_type

        #Embedding
        self.skl_input_size = input_size
        self.skl_emb_size = 256

        self.hand_input_size = 512
        self.hand_emb_size = 256

        bfc1 = bl.Linear(self.skl_input_size, self.skl_emb_size, **linear_args)
        bfc2 = bl.Linear(self.skl_emb_size, self.skl_emb_size, **linear_args)

        bfc3 = bl.Linear(self.hand_input_size, self.hand_emb_size, **linear_args)
        # self.bfc4 = bl.Linear(512, self.hand_emb_size, **linear_args)
        


        self.hand_embedding = nn.Sequential(bfc3,nn.ReLU())
        self.skl_embedding = nn.Sequential(bfc1,nn.ReLU(), bfc2,nn.ReLU())
        self.soft_att = SoftAttention(input_size = self.hand_input_size)
        self.soft_att_info = SoftAttention(input_size = self.hand_emb_size)
        self.soft_att_spt = SoftAttention(input_size = self.hand_emb_size)


        model = models.resnet34(pretrained=True)
        features= list(model.children())
        
        
        self.hand_features = nn.Sequential(*features[:-1], SqueezeExtractor())
       

        weights = [1.0, 1.0]
        class_weights = torch.FloatTensor(weights).cuda()
        self.loss_fn = nn.NLLLoss(weight = class_weights)  if output_size == 2 else nn.NLLLoss()
        self.output_size = output_size
        self.hidden_dim = hidden_dim 
        self.n_layers = n_layers 
        self.rnn = bl.LSTM( input_size = self.skl_emb_size  ,\
                                    hidden_size = self.hidden_dim, num_layers = self.n_layers, \
                                    batch_first=True,**rnn_args)              
        self.bfc_output = bl.Linear(self.hidden_dim, 15, **last_linear_args)


        self.rnn_spt = bl.LSTM( input_size = self.skl_emb_size  ,\
                                    hidden_size = self.hidden_dim, num_layers = self.n_layers, \
                                    batch_first=True,**rnn_args)              
        self.bfc_output_spt = bl.Linear(self.hidden_dim, 2, **last_linear_args)
        
        self.dropout = dropout
        self._baysian_layers = []
        self.hidden = None
    def __init__(self,
                 data_type,
                 input_size,
                 output_size,
                 hidden_dim,
                 n_layers,
                 mode="mc",
                 dropout=0.5,
                 logstd1=-1,
                 logstd2=-2,
                 pi=0.5):
        super(RTGR, self).__init__()
        #type = bl.ModelType.VAR_DROP_B_ADAP
        if mode == "mc": type = bl.ModelType.MC_DROP
        elif mode == "vd": type = bl.ModelType.VAR_DROP_B_ADAP
        elif mode == "bbb": type = bl.ModelType.BBB
        else: type = bl.ModelType.DET

        linear_args = {
            "mu": 0,
            "logstd1": logstd1,
            "logstd2": logstd2,
            "pi": pi,
            "type": type,
            "dropout": dropout
        }
        rnn_args = {
            "mu": 0,
            "logstd1": logstd1,
            "logstd2": logstd2,
            "pi": pi,
            "type": type,
            "dropout": dropout
        }
        last_linear_args = {
            "mu": 0,
            "logstd1": logstd1,
            "logstd2": logstd2,
            "pi": pi,
            "type": type,
            "dropout": 0
        }
        self.data_type = data_type

        #self.norm = nn.InstanceNorm1d(input_size)
        #Embedding

        model = models.resnet18(pretrained=True)
        features = list(model.children())

        conv = features[0]
        self.features = nn.Sequential(*features[1:-1])
        self.conv_left = inflate_conv(conv,
                                      time_dim=4,
                                      time_padding=0,
                                      time_stride=1,
                                      time_dilation=1,
                                      center=False)

        self.conv_right = inflate_conv(conv,
                                       time_dim=4,
                                       time_padding=0,
                                       time_stride=1,
                                       time_dilation=1,
                                       center=False)
        self.loss_fn = nn.NLLLoss(
            weight=class_weights) if output_size == 2 else nn.NLLLoss()
        self.output_size = 15  #12
        self.hidden_dim = hidden_dim  #256
        self.n_layers = n_layers  #2
        self.sharpen = False
        self.lstm = bl.LSTM( input_size = 512,\
                                    hidden_size = self.hidden_dim, num_layers = self.n_layers, \
                                    batch_first=True,**rnn_args)

        self.dropout = dropout
        self.fc = bl.Linear(self.hidden_dim, self.output_size,
                            **last_linear_args)
        self.dp_training = True
        self._baysian_layers = []
    def __init__(self,
                 output_size,
                 hidden_dim,
                 n_layers,
                 mode="mc",
                 dropout=0.5,
                 logstd1=-1,
                 logstd2=-2,
                 pi=0.5):
        super(IterStarRGBModel, self).__init__()
        #type = bl.ModelType.VAR_DROP_B_ADAP
        if mode == "mc": self.type = bl.ModelType.MC_DROP
        elif mode == "vd": self.type = bl.ModelType.VAR_DROP_B_ADAP
        elif mode == "bbb": self.type = bl.ModelType.BBB
        else: self.type = bl.ModelType.DET

        linear_args = {
            "mu": 0,
            "logstd1": logstd1,
            "logstd2": logstd2,
            "pi": pi,
            "type": self.type,
            "dropout": dropout
        }
        rnn_args = {
            "mu": 0,
            "logstd1": logstd1,
            "logstd2": logstd2,
            "pi": pi,
            "type": self.type,
            "dropout": dropout
        }
        last_linear_args = {
            "mu": 0,
            "logstd1": logstd1,
            "logstd2": logstd2,
            "pi": pi,
            "type": self.type,
            "dropout": 0
        }

        resnet = models.resnet50(pretrained=True)
        self.resnet = nn.Sequential(*(list(resnet.children())[:-1]))
        #self.norm = nn.InstanceNorm1d(input_size)
        #Embedding
        self.conv1d = nn.Conv1d(1, 1, 3, stride=2)
        # nn.MaxPool1d(2, stride=2)
        self.embeding_size = 1023
        weights = [1. / 1.5, 1.0]

        class_weights = torch.FloatTensor(weights).cuda()
        self.loss_fn = nn.NLLLoss(
            weight=class_weights) if output_size == 2 else nn.NLLLoss()
        self.output_size = output_size  #12
        self.hidden_dim = hidden_dim  #256
        self.n_layers = n_layers  #2
        self.sharpen = False
        self.lstm = bl.LSTM( input_size = self.embeding_size,\
                                    hidden_size = self.hidden_dim, num_layers = self.n_layers, \
                                    batch_first=True,**rnn_args)

        self.dropout = dropout
        # linear and sigmoid layers
        self.fc = bl.Linear(self.hidden_dim, self.output_size,
                            **last_linear_args)
        self.dp_training = True
        self._baysian_layers = []
        self.hidden = None
        # self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.device = torch.device('cpu')