def __init__(self, inputs, bs, max_time, classes, feature_dim, hidden_size, method='max', seed=12345): self._inputs = inputs self.method = method self.batch_size = bs self.classes = classes self.max_time = max_time self.feature_dim = feature_dim self.dropout = True self.hidden = HiddenLayer(input_size=feature_dim, hidden_size=hidden_size, batch_size=bs, name='hidden', dropout=0.5, activation=act.LeakyRelu()) self.softmax = SoftmaxLayer(input_size=hidden_size, classes=self.classes, batch_size=bs, name='softmax', dropout=0.5)
def __init__(self, inputs, bs, max_time, class_num, feature_dim, hidden_size, method='max', seed=12345): self._inputs = inputs self.method = method self.name = 'baseline_' + str(class_num) self.batch_size = bs self.class_num = theano.shared(class_num) self.max_time = max_time self.feature_dim = feature_dim self.dropout = True self.hidden = HiddenLayer(input_size=feature_dim, hidden_size=hidden_size, batch_size=bs, name='hidden', dropout=0.5, activation=act.LeakyRelu()) self.classify = HiddenLayer(input_size=hidden_size, hidden_size=1, batch_size=bs, name='classify', dropout=0.0, activation=act.sigmoid)
def __init__(self, inputs, bs, max_time, class_num, feature_dim, hidden_size, levels, N=1, pool=None, seed=12345): self._inputs = inputs self.N = N self.batch_size = bs self.name = 'learned_' + str(class_num) self.class_num = theano.shared(class_num) self.max_time = max_time self.filters = levels self.feature_dim = feature_dim self.pool = pool self.dropout = True self.temporal_pyramid = [] for f in range(self.filters): tf = TemporalAttentionLayer(batch_size=bs, N=N, channels=feature_dim, name='af-' + str(f)) self.temporal_pyramid.append(tf) input_size = feature_dim * len(self.temporal_pyramid) #*N self.hidden = HiddenLayer(input_size=input_size, hidden_size=hidden_size, activation=act.LeakyRelu(), batch_size=bs, name='hidden', dropout=0.5) self.classify = HiddenLayer(input_size=hidden_size, hidden_size=1, batch_size=bs, name='classify', dropout=0.0, activation=act.sigmoid)
def __init__(self, inputs, bs, max_time, classes, feature_dim, hidden_size, levels, N=1, pool=None, seed=12345): self._inputs = inputs self.N = N self.batch_size = bs self.classes = classes self.max_time = max_time self.levels = levels self.feature_dim = feature_dim self.pool = pool self.dropout = True # create a pyramid of filters self.temporal_pyramid = [] for l in range(self.levels): for f in range(2**l): tf = TemporalAttentionLayer(batch_size=bs, N=N, channels=feature_dim, name='temporal-attention-layer-'+str(l)+'-filter-'+str(f)) tf.test = True tf.d = theano.shared(value=np.asarray([1./2**(l+1)]).astype('float32'), name='d', borrow=True, broadcastable=[True]) tf.g = theano.shared(value=np.asarray([((1./2**l)+(2*f/2.**l))]).astype('float32'), name='g', borrow=True, broadcastable=[True]) tf.sigma = theano.shared(value=np.asarray([5.0]).astype('float32'), name='sigma', borrow=True, broadcastable=[True]) self.temporal_pyramid.append(tf) input_size = feature_dim*N*(len(self.temporal_pyramid) if pool == None else 1) self.hidden = HiddenLayer(input_size=input_size, hidden_size=hidden_size, activation=act.LeakyRelu(), batch_size=bs, name='hidden', dropout=0.5) self.softmax = SoftmaxLayer(input_size=hidden_size, classes=self.classes, batch_size=bs, name='softmax', dropout=0.5)