x = Dense(9, weights=[ np.zeros([256, 9]), np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]).astype(np.float32) ])(x) input_T = Reshape((3, 3))(x) in_0 = MatMul()([input_points, input_T]) # forward net0 f_0 = Conv1D(64, 1, activation='relu')(in_0) f_0 = BatchNormalization()(f_0) f_0 = Conv1D(64, 1, activation='relu')(f_0) c_0 = Conv1D(32, 1, activation='relu')(in_0) c_0 = BatchNormalization()(c_0) c_0 = Conv1D(32, 1, activation='relu')(c_0) out_0 = GumbelSoftmax(1, hard=True)([f_0, c_0]) global_feature = MaxPooling1D(pool_size=2048)(c_0) c = Dense(512, activation='relu')(global_feature) c = BatchNormalization()(c) c = Dropout(0.5)(c) c = Dense(256, activation='relu')(c) c = BatchNormalization()(c) c = Dropout(0.5)(c) c = Dense(40, activation='softmax')(c) prediction = Flatten()(c) ''' model = Model(inputs=input_points, outputs=[out_0, prediction]) xx = np.random.rand(32,2048, 3) - 0.5 y = model.predict_on_batch(xx) '''
x = BatchNormalization()(x) x = Dense(9, weights=[ np.zeros([256, 9]), np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]).astype(np.float32) ])(x) input_T = Reshape((3, 3))(x) in_0 = MatMul()([input_points, input_T]) # forward net0 f_0 = Conv1D(64, 1, activation='relu')(in_0) f_0 = Conv1D(64, 1, activation='relu')(f_0) c_0 = Conv1D(512, 1, activation='relu')(in_0) c_0 = Conv1D(512, 1, activation='relu')(c_0) out_0 = GumbelSoftmax(1, hard=False)([f_0, c_0]) # forward net1 f_1 = Conv1D(128, 1, activation='relu')(out_0) f_1 = Conv1D(128, 1, activation='relu')(f_1) c_1 = Conv1D(128, 1, activation='relu')(out_0) c_1 = Conv1D(128, 1, activation='relu')(c_1) out_1 = GumbelSoftmax(1, hard=False)([f_1, c_1]) # forward net2 f_2 = Conv1D(256, 1, activation='relu')(out_1) f_2 = Conv1D(256, 1, activation='relu')(f_2) c_2 = Conv1D(32, 1, activation='relu')(out_1) c_2 = Conv1D(32, 1, activation='relu')(c_2) out_2 = GumbelSoftmax(1, hard=False)([f_2, c_2])
x = Dense(9, weights=[ np.zeros([256, 9]), np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]).astype(np.float32) ])(x) input_T = Reshape((3, 3))(x) in_0 = MatMul()([input_points, input_T]) # forward net0 f_0 = Conv1D(64, 1, activation='relu')(in_0) f_0 = BatchNormalization()(f_0) f_0 = Conv1D(64, 1, activation='relu')(f_0) c_0 = Conv1D(32, 1, activation='relu')(in_0) c_0 = BatchNormalization()(c_0) c_0 = Conv1D(32, 1, activation='relu')(c_0) out_0 = GumbelSoftmax(nb_batch=32, temperature=1, hard=True)([f_0, c_0]) ''' global_feature = MaxPooling1D(pool_size=32)(out_0) c = Dense(512, activation='relu')(global_feature) c = BatchNormalization()(c) c = Dropout(0.5)(c) c = Dense(256, activation='relu')(c) c = BatchNormalization()(c) c = Dropout(0.5)(c) c = Dense(40, activation='softmax')(c) prediction = Flatten()(c) ''' model = Model(inputs=input_points, outputs=[out_0]) xx = np.random.rand(32, 2048, 3) - 0.5 y = model.predict_on_batch(xx)
x = Dense(9, weights=[ np.zeros([256, 9]), np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]).astype(np.float32) ])(x) input_T = Reshape((3, 3))(x) in_0 = MatMul()([input_points, input_T]) # forward net0 f_0 = Conv1D(64, 1, activation='relu')(in_0) f_0 = BatchNormalization()(f_0) f_0 = Conv1D(64, 1, activation='relu')(f_0) c_0 = Conv1D(256, 1, activation='relu')(in_0) c_0 = BatchNormalization()(c_0) c_0 = Conv1D(256, 1, activation='relu')(c_0) i_0 = GumbelSoftmax(temperature=1, hard=True)(c_0) for _ in range(2): i_0_t = GumbelSoftmax(temperature=1, hard=True)(c_0) i_0 = GumbelIntegration('max')([i_0, i_0_t]) out_0 = GumbelPooling(pool_way='max')([f_0, i_0]) '''''' global_feature = MaxPooling1D(pool_size=256)(out_0) c = Dense(512, activation='relu')(global_feature) c = BatchNormalization()(c) c = Dropout(0.5)(c) c = Dense(256, activation='relu')(c) c = BatchNormalization()(c) c = Dropout(0.5)(c) c = Dense(40, activation='softmax')(c) prediction = Flatten()(c) '''
x = BatchNormalization()(x) x = Dense(256, activation='relu')(x) x = BatchNormalization()(x) x = Dense(9, weights=[np.zeros([256, 9]), np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]).astype(np.float32)])(x) input_T = Reshape((3, 3))(x) in_0 = MatMul()([input_points, input_T]) # forward net0 f_0 = Conv1D(64, 1, activation='relu')(in_0) f_0 = BatchNormalization()(f_0) f_0 = Conv1D(64, 1, activation='relu')(f_0) c_0 = Conv1D(256, 1, activation='relu')(in_0) c_0 = BatchNormalization()(c_0) c_0 = Conv1D(256, 1, activation='relu')(c_0) c_0 = GumbelSoftmax(temperature=0.5, hard = True)(c_0) out_0 = GumbelPooling(pool_way = 'max')([f_0, c_0]) '''''' global_feature = MaxPooling1D(pool_size=256)(out_0) c = Dense(512, activation='relu')(global_feature) c = BatchNormalization()(c) c = Dropout(0.5)(c) c = Dense(256, activation='relu')(c) c = BatchNormalization()(c) c = Dropout(0.5)(c) c = Dense(40, activation='softmax')(c) prediction = Flatten()(c) '''
f = Dense(256, activation='relu')(f) f = BatchNormalization()(f) f = Dense(64 * 64, weights=[np.zeros([256, 64 * 64]), np.eye(64).flatten().astype(np.float32)])(f) feature_T = Reshape((64, 64))(f) in_0 = MatMul()([g, feature_T]) # forward net0 f_0 = Conv1D(128, 1, activation='relu')(in_0) f_0 = BatchNormalization()(f_0) c_0 = Conv1D(64, 1, activation='relu')(in_0) c_0 = BatchNormalization()(c_0) c_0 = Activation('relu')(c_0) #c_0 = GumbelSoftmax(temperature=0.1, hard = False)(c_0) i_0 = GumbelSoftmax(temperature=0.1, hard = True)(c_0) for _ in range(0): i_0_t = GumbelSoftmax(temperature=0.1, hard = True)(c_0) i_0 = GumbelIntegration('max')([i_0,i_0_t]) out_0 = GumbelPooling(pool_way = 'mean')([f_0, i_0]) # forward net1 f_1 = Conv1D(256, 1, activation='relu')(out_0) f_1 = BatchNormalization()(f_1) c_1 = Conv1D(16, 1, activation='relu')(out_0) c_1 = BatchNormalization()(c_1) c_1 = Activation('relu')(c_1) #c_1 = GumbelSoftmax(temperature=0.1, hard = False)(c_1) i_1 = GumbelSoftmax(temperature=0.1, hard = True)(c_1) for _ in range(0):