def attach(self, netspec, bottom, residual_branch): ######## Pre Norm ######## prenorm = BNReLUModule(name_template=self.name_template, \ bn_params=self.bnParams, \ sync_bn=self.sync_bn).attach(netspec, bottom) ######## 1x1x1 Shortcut ######## shortcut = BaseModule('Convolution', self.conv3x1x1Params).attach(netspec, [prenorm]) ######## Main Branch ######## #### Spatial Global Pooling #### pooling = BaseModule('Pooling', self.poolingParams).attach(netspec, [prenorm]) #### Temporal Convolution #### t_conv = BaseModule('Convolution', self.t_convParams).attach(netspec, [pooling]) #### Sigmoid #### sigmoid = BaseModule('Sigmoid', self.sigmoidParams).attach(netspec, [t_conv]) ######## add ######## out = BaseModule('Axpxpy', self.addParams).attach( netspec, [sigmoid, shortcut, residual_branch]) return out
def attach(self, netspec, bottom): ######## Pre Norm ######## prenorm = BNReLUModule(name_template=self.name_template, \ bn_params=self.bnParams, \ sync_bn=self.sync_bn).attach(netspec, bottom) ######## 1x1x1 Shortcut ######## shortcut = BaseModule('Convolution', self.conv1x1x1Params).attach(netspec, [prenorm]) ######## Main Branch ######## #### Spatial Global Pooling #### pooling = BaseModule('Pooling', self.poolingParams).attach(netspec, [prenorm]) #### Temporal Convolution #### t_conv = BaseModule('Convolution', self.t_convParams).attach(netspec, [pooling]) #### Reshape #### reshape = BaseModule('Reshape', self.reshapeParams).attach(netspec, [t_conv]) ######## Bias ######## bias = BaseModule('Bias', self.biasParams).attach(netspec, [shortcut, reshape]) return bias
def __init__(self, db): BaseModule.__init__(self, db) config = ConfigParser.ConfigParser() config.readfp(open(AP + 'common/conf.ini')) self._circle_table = self.prefix + 'circle_table' self._c_id = config.get(self._circle_table, "c_id") self._umeng_cid = config.get(self._circle_table, "umeng_cid") self._umeng_virtual_cid = config.get(self._circle_table, "umeng_virtual_cid") # self._circle_type_id = config.get(self._circle_table,"circle_type_id") self._icon_url = config.get(self._circle_table, "icon_url")
def __init__(self, db): BaseModule.__init__(self, db) config = ConfigParser.ConfigParser() config.readfp(open(AP + 'common/conf.ini')) self._user_common_table = self.prefix + 'user_common_info' self._uid = config.get(self._user_common_table, "uid") self._admission_year = config.get(self._user_common_table, "admission_year") self._faculty = config.get(self._user_common_table, "faculty") self._major = config.get(self._user_common_table, "major") self._name = config.get(self._user_common_table, "name") self._gender = config.get(self._user_common_table, "gender") self._job = config.get(self._user_common_table, "job") self._icon_url = config.get(self._user_common_table, "icon_url") self._city = config.get(self._user_common_table, "city") self._state = config.get(self._user_common_table, "state") self._country = config.get(self._user_common_table, "country")
def __init__(self, db): BaseModule.__init__(self, db) config = ConfigParser.ConfigParser() config.readfp(open(AP + 'common/conf.ini')) self._manual_review_table = self.prefix + 'manual_review_table' self._review_id = config.get(self._manual_review_table, "review_id") self._circle_name = config.get(self._manual_review_table, "circle_name") self._circle_icon_url = config.get(self._manual_review_table, "circle_icon_url") self._creator_uid = config.get(self._manual_review_table, "creator_uid") self._circle_type_id = config.get(self._manual_review_table, "circle_type_id") self._reason_message = config.get(self._manual_review_table, "reason_message") self._result = config.get(self._manual_review_table, "result") self._description = config.get(self._manual_review_table, "description") self._circle_type_name = config.get(self._manual_review_table, "circle_type_name") self._creator_name = config.get(self._manual_review_table, "creator_name")
def attach(self, netspec, bottom): if self.uni_bn: if self.sync_bn: bn = BaseModule('SyncBN', self.bnParams).attach(netspec, bottom) else: bn = BaseModule('BN', self.bnParams).attach(netspec, bottom) relu = BaseModule('ReLU', self.reluParams).attach(netspec, [bn]) else: batch_norm = BaseModule('BatchNorm', self.batchNormParams).attach( netspec, bottom) scale = BaseModule('Scale', self.scaleParams).attach(netspec, [batch_norm]) relu = BaseModule('ReLU', self.reluParams).attach(netspec, [scale]) corr = BaseModule('Corrv1', self.corrParams).attach(netspec, [relu]) return corr
def write_prototxt(is_train, output_folder, \ filename, main_branch, \ num_output_stage1, \ blocks, sync_bn, uni_bn): netspec = caffe.NetSpec() #### Input Setting #### crop_size = 112 width = 170 height = 128 length = 16 step = 8 num_segments = 1 if is_train: use_global_stats = False else: use_global_stats = True #### Data layer #### if is_train: data_train_params = dict(name='data', \ ntop=2, \ video4d_data_param=dict( \ source="../kinetics_train_list.txt", \ batch_size=32, \ new_width=width, \ new_height=height, \ new_length=length, \ num_segments=num_segments, \ modality=0, \ step=step, \ rand_step=True, \ name_pattern='image_%06d.jpg', \ shuffle=True), \ transform_param=dict( crop_size=crop_size, \ mirror=True, \ multi_scale=True, \ max_distort=1, \ scale_ratios=[1, 0.875, 0.75, 0.66], \ mean_value=[104]*length+[117]*length+[123]*length), \ include=dict(phase=0)) data_val_params = dict(name='vdata', \ ntop=2, \ video4d_data_param=dict( source="../kinetics_val_list.txt", \ batch_size=1, \ new_width=width, \ new_height=height, \ new_length=length, \ num_segments=num_segments, \ modality=0, \ step=step, \ name_pattern='image_%06d.jpg'), \ transform_param=dict( crop_size=crop_size, \ mirror=False, \ mean_value=[104]*length+[117]*length+[123]*length), \ include=dict(phase=1)) # pdb.set_trace() netspec.data, netspec.label = BaseModule('Video4dData', data_train_params).attach(netspec, []) netspec.vdata, netspec.vlabel = BaseModule('Video4dData', data_val_params).attach(netspec, []) else: data_params = dict(name='data', \ dummy_data_param=dict( \ shape=dict(\ dim=[10, 3, length, crop_size, crop_size]))) netspec.data = BaseModule('DummyData', data_params).attach(netspec, []) #### (Optional) Reshape Layer #### if is_train: reshape_params = dict(name='data_reshape', \ reshape_param=dict( \ shape=dict(dim=[-1, 3, length, crop_size, crop_size]))) netspec.data_reshape = BaseModule('Reshape', reshape_params).attach(netspec, [netspec.data]) #### Stage 1 #### channels = 3*7*7*3*64/(7*7*3+3*64) conv1xdxd_params = dict(name='conv1_1x3x3', \ num_output=channels, \ kernel_size=[1, 7, 7], \ pad=[0, 3, 3], \ stride=[1, 2, 2], \ engine=2) conv1_1xdxd = BaseModule('Convolution', conv1xdxd_params).attach( netspec, [netspec.data_reshape if is_train else netspec.data]) convtx1x1_params = dict(name='conv1_3x1x1', \ num_output=64, \ kernel_size=[3, 1, 1], \ pad=[1, 0, 0], \ stride=[2, 1, 1], \ engine=2) if uni_bn: bn_params = dict(frozen=False) else: bn_params = dict(use_global_stats=use_global_stats) stage1 = BNReLUConvModule(name_template='1', \ bn_params=bn_params, \ conv_params=convtx1x1_params, \ sync_bn=sync_bn, \ uni_bn=uni_bn).attach(netspec, [conv1_1xdxd]) num_output = num_output_stage1 #### Stages 2 - 5 #### last = stage1 for stage in range(4): for block in range(blocks[stage]): # First block usually projection if block == 0: shortcut = 'projection' stride = 2 if stage == 0: shortcut = 'identity' stride = 1 else: shortcut = 'identity' stride = 1 name = str(stage+2) + num2letter[int(block)] curr_num_output = num_output * (2 ** (stage)) if uni_bn: params = dict(name=name, num_output=curr_num_output, shortcut=shortcut, main_branch=main_branch, stride=stride, frozen=False) else: params = dict(name=name, num_output=curr_num_output, shortcut=shortcut, main_branch=main_branch, stride=stride, use_global_stats=use_global_stats) last = PreActWiderDecoupBlock(name_template=name, \ shortcut=shortcut, \ num_output=curr_num_output, \ stride=stride, \ sync_bn=sync_bn, \ uni_bn=uni_bn).attach(netspec, [last]) if stage == 0 and block == 1: name = 'stage2_atten' last = CorrAttentionBlock(name_template=name, template_type=1, num_output=curr_num_output, kernel_size=1, max_displacement=7, pad=7).attach(netspec, [last]) if stage == 1 and block == 1: name = 'stage3_atten' print(name) last = CorrAttentionBlock(name_template=name, template_type=1, num_output=curr_num_output, kernel_size=1, max_displacement=5, pad=5).attach(netspec, [last]) #### Last Norm & ReLU #### if uni_bn: bn_params = dict(frozen=False) else: bn_params = dict(use_global_stats=use_global_stats) last = BNReLUModule(name_template='5b', \ bn_params=bn_params, \ sync_bn=sync_bn, \ uni_bn=uni_bn).attach(netspec, [last]) #### pool5 #### pool_params = dict(global_pooling=True, pool=P.Pooling.AVE, name='pool5') pool = BaseModule('Pooling', pool_params).attach(netspec, [last]) #### pool5_reshape #### reshape_params = dict(shape=dict(dim=[-1, num_output_stage1 * 8]), name='pool5_reshape') reshape = BaseModule('Reshape', reshape_params).attach(netspec, [pool]) #### dropout #### dropout_params = dict(dropout_ratio=0.2, name='dropout') dropout = BaseModule('Dropout', dropout_params).attach(netspec, [reshape]) #### ip #### ip_params = dict(name='fc400', num_output=400) ip = BaseModule('InnerProduct', ip_params).attach(netspec, [dropout]) if is_train: #### Softmax Loss #### smax_params = dict(name='loss') smax_loss = BaseModule('SoftmaxWithLoss', smax_params).attach(netspec, [ip, netspec.label]) #### Top1 Accuracy #### top1_params = dict(name='top1', accuracy_param=dict(top_k=1), include=dict(phase=1)) top1 = BaseModule('Accuracy', top1_params).attach(netspec, [ip, netspec.label]) #### Top5 Accuracy #### top5_params = dict(name='top5', accuracy_param=dict(top_k=5), include=dict(phase=1)) top5 = BaseModule('Accuracy', top5_params).attach(netspec, [ip, netspec.label]) filepath = os.path.join(output_folder, filename) fp = open(filepath, 'w') print >> fp, netspec.to_proto() fp.close()
def __init__(self, db): BaseModule.__init__(self, db) self.prefix = self.prefix + 'message_' config = ConfigParser.ConfigParser() config.readfp(open(AP + 'common/conf.ini'))
def attach(self, netspec, bottom): #### BNReLU + tx1x1 convA #### name = self.name_template prenorm = BNReLUModule(name_template=name, \ bn_params=self.bn_params, \ sync_bn=self.sync_bn, \ uni_bn=self.uni_bn).attach(netspec, bottom) convtx1x1_params = dict(name='conv_' + name, \ num_output=self.num_output, \ kernel_size=[3,1,1], \ pad=[1,0,0], \ stride=[self.stride,1,1], \ engine=2) br2a_tx1x1 = BaseModule('Convolution', convtx1x1_params).attach(netspec, [prenorm]) #### pyramid_1 #### name = self.name_template + '_p1' pool_params = dict(name='pool_' + name, kernel_size=[1, 3, 3], pad=[0, 1, 1], stride=[1, 2, 2], pool=0) pool1 = BaseModule('Pooling', pool_params).attach(netspec, [prenorm]) convtx1x1_params = dict(name='conv_' + name, \ num_output=self.num_output/2, \ kernel_size=[3,1,1], \ pad=[1,0,0], \ stride=[1,1,1], \ engine=2) br2a_tx1x1_p1 = BaseModule('Convolution', convtx1x1_params).attach(netspec, [pool1]) interp_params = dict(name='interp_' + name) interp_p1 = BaseModule('Interp', interp_params).attach( netspec, [br2a_tx1x1_p1, br2a_tx1x1]) #### pyramid_2 #### name = self.name_template + '_p2' pool_params = dict(name='pool_' + name, kernel_size=[1, 3, 3], pad=[0, 1, 1], stride=[1, 4, 4], pool=0) pool2 = BaseModule('Pooling', pool_params).attach(netspec, [prenorm]) convtx1x1_params = dict(name='conv_' + name, \ num_output=self.num_output/2, \ kernel_size=[3,1,1], \ pad=[1,0,0], \ stride=[1,1,1], \ engine=2) br2a_tx1x1_p2 = BaseModule('Convolution', convtx1x1_params).attach(netspec, [pool2]) interp_params = dict(name='interp_' + name) interp_p2 = BaseModule('Interp', interp_params).attach( netspec, [br2a_tx1x1_p2, br2a_tx1x1]) #### pyramid_extreme #### # Not Implemented #### concat #### name = self.name_template + '_concat' concat_params = dict(name=name) # [1, 1, 1] concat = BaseModule('Concat', concat_params).attach( netspec, [br2a_tx1x1, interp_p1, interp_p2]) #### fusion conv #### name = self.name_template + '_fusion' convtx1x1_params = dict(name='conv_' + name, \ num_output=self.num_output, \ kernel_size=[3,1,1], \ pad=[1,0,0], \ stride=[1,1,1], \ engine=2) out = BNReLUConvModule(name_template=name, bn_params=self.bn_params, conv_params=convtx1x1_params).attach( netspec, [concat]) return br2a_tx1x1, out
def attach(self, netspec, bottom, res=None): #### BNReLU + tx1x1 convA #### name = self.name_template prenorm = BNReLUModule(name_template=name, \ bn_params=self.bn_params, \ sync_bn=self.sync_bn, \ uni_bn=self.uni_bn).attach(netspec, bottom) convtx1x1_params = dict(name='conv_' + name, \ num_output=self.num_output, \ kernel_size=[3,1,1], \ pad=[1,0,0], \ stride=[self.stride,1,1], \ engine=2) br2a_tx1x1 = BaseModule('Convolution', convtx1x1_params).attach(netspec, [prenorm]) #### pyramid_1 #### name = self.name_template + '_p1' pool_params = dict(name='pool_' + name, kernel_size=[1, 3, 3], pad=[0, 1, 1], stride=[1, 2, 2], pool=0) pool1 = BaseModule('Pooling', pool_params).attach(netspec, [br2a_tx1x1]) convtx1x1_params = dict(name='conv_' + name, \ num_output=self.num_output, \ kernel_size=[3,1,1], \ pad=[1,0,0], \ stride=[1,1,1], \ engine=2) br2a_tx1x1_p1 = BaseModule('Convolution', convtx1x1_params).attach(netspec, [pool1]) interp_params = dict(name='interp_' + name) interp_p1 = BaseModule('Interp', interp_params).attach( netspec, [br2a_tx1x1_p1, br2a_tx1x1]) #### pyramid_2 #### name = self.name_template + '_p2' pool_params = dict(name='pool_' + name, kernel_size=[1, 3, 3], pad=[0, 1, 1], stride=[1, 2, 2], pool=0) pool2 = BaseModule('Pooling', pool_params).attach(netspec, [br2a_tx1x1_p1]) convtx1x1_params = dict(name='conv_' + name, \ num_output=self.num_output, \ kernel_size=[3,1,1], \ pad=[1,0,0], \ stride=[1,1,1], \ engine=2) br2a_tx1x1_p2 = BaseModule('Convolution', convtx1x1_params).attach(netspec, [pool2]) interp_params = dict(name='interp_' + name) interp_p2 = BaseModule('Interp', interp_params).attach( netspec, [br2a_tx1x1_p2, br2a_tx1x1]) #### pyramid_extreme #### ## Not Added Yet #### add #### if res is None: name = self.name_template + '_add' eltwise_params = dict(name=name, operation=1, coeff=[1, 0.5, 0.5]) # [1, 1, 1] out = BaseModule('Eltwise', eltwise_params).attach( netspec, [br2a_tx1x1, interp_p1, interp_p2]) else: name = 'eltadd_' + res[0] eltwise_params = dict(name=name, operation=1, coeff=[1, 1, 0.5, 0.5]) out = BaseModule('Eltwise', eltwise_params).attach( netspec, [res[1], br2a_tx1x1, interp_p1, interp_p2]) return out