def __init__(self, attr): Attr.__init__(self, attr) self._className = attr['Type'].rsplit(None, 1)[-1] self._backRefAttr = None # init'ed in awakeFromRead() if self.get('Min') is not None: self['Min'] = int(self['Min']) if self.get('Max') is not None: self['Max'] = int(self['Max'])
def __init__(self, dict): Attr.__init__(self, dict) self._className = dict['Type'].split()[-1] self._backRefAttr = None # init'ed in awakeFromRead() if self.get('Min') is not None: self['Min'] = int(self['Min']) if self.get('Max') is not None: self['Max'] = int(self['Max'])
def __init__(self, dict): Attr.__init__(self, dict) # We expect than an 'Enums' key holds the enumeration values enums = self['Enums'] enums = enums.split(',') enums = [enum.strip() for enum in enums] self._enums = enums set = {} for enum in self._enums: set[enum] = 1 self._enumSet = set
def build(self): self.attr_net = Attr() self.speed_lstm = SpeedLSTM() self.road_lstm = RoadLSTM() self.bi_lstm = nn.LSTM( input_size = self.attr_net.out_size() + 64, \ hidden_size = 64, \ num_layers = 2, \ batch_first = True, \ bidirectional = True, \ dropout = 0.25 ) self.lnhiddens = nn.LayerNorm(self.attr_net.out_size() + 64, elementwise_affine=True)
class PredictionBiLSTM(nn.Module): def __init__(self): super(PredictionBiLSTM, self).__init__() self.build() def build(self): self.attr_net = Attr() self.speed_lstm = SpeedLSTM() self.road_lstm = RoadLSTM() self.bi_lstm = nn.LSTM( input_size = self.attr_net.out_size() + 64, \ hidden_size = 64, \ num_layers = 2, \ batch_first = True, \ bidirectional = True, \ dropout = 0.25 ) self.lnhiddens = nn.LayerNorm(self.attr_net.out_size() + 64, elementwise_affine=True) nn.init.uniform_(self.bi_lstm.state_dict()['weight_hh_l0'], a=-0.05, b=0.05) def forward(self, attr, traj): speeds_t = self.speed_lstm(attr, traj) roads_t = self.road_lstm(attr, traj) attr_t = self.attr_net(attr) attr_t = torch.unsqueeze(attr_t, dim=1) expand_attr_t = attr_t.expand(roads_t.size()[:2] + (attr_t.size()[-1], )) hiddens = torch.cat([expand_attr_t, speeds_t, roads_t], dim=2) hiddens = self.lnhiddens(hiddens) lens = copy.deepcopy(traj['lens']) lens = list(map(lambda x: x, lens)) packed_inputs = nn.utils.rnn.pack_padded_sequence(hiddens, lens, batch_first=True) packed_hiddens, (h_n, c_n) = self.bi_lstm(packed_inputs) hiddens, lens = nn.utils.rnn.pad_packed_sequence(packed_hiddens, batch_first=True) return hiddens
def build(self): self.attr_net = Attr() self.speed_lstm = SpeedLSTM() self.road_lstm = RoadLSTM() self.bi_lstm = nn.LSTM( input_size = self.attr_net.out_size() + 64, \ hidden_size = 64, \ num_layers = 2, \ batch_first = True, \ bidirectional = True, \ dropout = 0.25 ) self.lnhiddens = nn.LayerNorm(self.attr_net.out_size() + 64, elementwise_affine=True) nn.init.uniform_(self.bi_lstm.state_dict()['weight_hh_l0'], a=-0.05, b=0.05)
def ParseValue(self, v, t): if t == 'int': return int(v) elif t == 'str': return str(v) elif t == 'attr': return Attr(str(v)) elif t == 'reward': return Reward(str(v)) elif t == 'listint': return ListInt(str(v)) elif t == 'starrate': return StarRate(str(v)) return 0
def getAttributeNode(self, name): return Attr(self.doc, self, name) if self.tag.has_attr(name) else None
def __init__(self, dict): Attr.__init__(self, dict)
def __init__(self, dict): Attr.__init__(self, dict) self._className = dict['Type']
def __init__(self, attr): Attr.__init__(self, attr) self._className = attr['Type']
def __init__(self, attr): Attr.__init__(self, attr)
def __init__(self, attr): Attr.__init__(self, attr) # We expect that an 'Enums' key holds the enumeration values self._enums = [enum.strip() for enum in self['Enums'].split(',')] self._enumSet = dict((enum, i) for i, enum in enumerate(self._enums))
def __init__(self, dict): Attr.__init__(self, dict) self._className = dict['Type'].split()[-1]
def createAttribute(self, name): return Attr(self, None, name)