class Pool(Layer):
    def __init__(self, pool_shape, mode='max'):
        self.mode = mode
        self.pool_h, self.pool_w = pool_shape
        self.pool_ob = Pooling()

    def forward_propogation(self, input):
        print("Forward  Propogation : Pooling")
        self.last_input_shape = input.shape # save for backprop

        self.last_maxpositions = numpy.empty(self.output_shape(input.shape)+(2,), dtype=numpy.int)  # positions which hold the maximum elements

        poolout,self.last_maxpositions = self.pool_ob.pool(input,self.pool_h,self.pool_w,self.last_maxpositions,self.mode)
        return poolout

    def backward_propogation(self, output_grad):
        print("Backward Propogation : Pooling")
        input_grad = numpy.empty(self.last_input_shape)

        input_grad  = self.pool_ob.pool_backprop(output_grad,self.last_input_shape,self.last_maxpositions)

        return input_grad

    def output_shape(self, input_shape):
        shape = (input_shape[0],
                 input_shape[1],
                 input_shape[2]//self.pool_h,
                 input_shape[3]//self.pool_w)
        return shape
示例#2
0
    def __init__(
            self,
            input_dim=(1, 28, 28),
            conv_param={
                'filter_num': 30,
                'filter_size': 5,
                'pad': 0,
                'stride': 1
            },
            hidden_size=100,
            output_size=10,
            weight_init_std=0.01
    ):
        # 畳み込み層のハイパーパラメータ
        filter_num = conv_param['filter_num']
        filter_size = conv_param['filter_size']
        filter_pad = conv_param['pad']
        filter_stride = conv_param['stride']
        input_size = input_dim[1]

        conv_output_size = (input_size - filter_size + 2 *
                            filter_pad) / filter_stride + 1
        pool_output_size = int(
            filter_num * (conv_output_size / 2) * (conv_output_size / 2)
        )

        # 重みパラメータ
        self.params = {}
        self.params['W1'] = weight_init_std * \
            np.random.randn(filter_num, input_dim[0], filter_size, filter_size)
        self.params['b1'] = np.zeros(filter_num)

        self.params['W2'] = weight_init_std * \
            np.random.randn(pool_output_size, hidden_size)
        self.params['b2'] = np.zeros(hidden_size)

        self.params['W3'] = weight_init_std * \
            np.random.randn(hidden_size, output_size)
        self.params['b3'] = np.zeros(output_size)

        # レイヤー
        self.layers = OrderedDict()
        self.layers['Conv1'] = Convolution(
            self.params['W1'],
            self.params['b1'],
            conv_param['stride'],
            conv_param['pad']
        )
        self.layers['Relu1'] = Relu()
        self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2)

        self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])
        self.layers['Relu2'] = Relu()

        self.layers['Affine3'] = Affine(self.params['W3'], self.params['b3'])
        self.last_layer = Softmax()
示例#3
0
print(os.getcwd())

import sys
path1 = os.path.join(os.path.dirname(os.getcwd()), 'trajnet/base/scripts/lstm')
path2 = os.path.join(os.path.dirname(os.getcwd()), 'trajnet/base/scripts')
sys.path.append(path1)
sys.path.append(path2)
sys.path.append(os.getcwd())

from lstm import LSTM, LSTMPredictor  #, drop_distant
from pooling import Pooling, HiddenStateMLPPooling
VERSION = '0.1.0'

# create model
pool = None
pool = Pooling(type_='directional', hidden_dim=128, cell_side=2)
traj_model = LSTM(pool=pool, embedding_dim=64, hidden_dim=128)

traj_model.load_state_dict(
    torch.load(os.path.join(os.getcwd(),
                            "directional.pkl.state"))['state_dict'])
traj_model.eval()

for param in traj_model.parameters():
    param.requires_grad = False


def main():
    parser = argparse.ArgumentParser('Parse configuration file')
    parser.add_argument('--env_config', type=str, default='configs/env.config')
    parser.add_argument('--policy', type=str, default='cadrl')
示例#4
0
    def __init__(self, n_batch, ratio, n_v, n_rings, n_dirs,
                 inputs,
                 bin_contributors,
                 weights,
                 transport,
                 parents,
                 angular_shifts,
                 batch_norm=False,
                 nstacks=1,
                 nresblocks_per_stack=2,
                 nfilters=16,
                 sync_mode='radial_sync',
                 additional_inputs=None,
                 name=''):

        """ResNet Version 1 Model builder [a]
        Stacks of 2 x (3 x 3) Conv2D-BN-ReLU
        Last ReLU is after the shortcut connection.
        At the beginning of each stage, the feature map size is halved (downsampled)
        by a convolutional layer with strides=2, while the number of filters is
        doubled. Within each stage, the layers have the same number filters and the
        same number of filters.
        Features maps sizes:
        stage 0: 32x32, 16
        stage 1: 16x16, 32
        stage 2:  8x8,  64
        The Number of parameters is approx the same as Table 6 of [a]:
        ResNet20 0.27M
        ResNet32 0.46M
        ResNet44 0.66M
        ResNet56 0.85M
        ResNet110 1.7M
        # Arguments
            input_shape (tensor): shape of input image tensor
            depth (int): number of core convolutional layers
            num_classes (int): number of classes (CIFAR10 has 10)
        # Returns
            model (Model): Keras model instance
        """
        if name is not '':
            name = name + '_'

        bn = batch_norm
        pool_ = True

        take_max = False
        if sync_mode is 'async':
            take_max = True

        if n_v is None:
            n_v = [None for _ in range(len(n_dirs))]

        # if (depth - 2) % 6 != 0:
        #    raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')
        # Start model definition.
        num_filters = nfilters
        self.nfilters = []
        # num_res_blocks = int((depth - 2) / 6)
        self.num_res_blocks = nresblocks_per_stack
        self.additional_inputs = additional_inputs
        self.nstacks = nstacks
        self.inputs_names = [name + 'input_signal']

        x = inputs

        C = bin_contributors
        W = weights
        TA = transport
        P = parents
        AS = angular_shifts
        NV = []
        for i in range(nstacks-1):
            self.inputs_names.append(name + 'parents_' + int_to_string(i))
            self.inputs_names.append(name + 'angular_shifts_' + int_to_string(i))
        for i in range(nstacks):
            self.inputs_names.append(name + 'contributors_' + int_to_string(i))
            self.inputs_names.append(name + 'weights_' + int_to_string(i))
            self.inputs_names.append(name + 'transport_' + int_to_string(i))
            NV.append(Shape(axis=1)(C[i]))



        """
        inputs = Input(shape=(n_v[0], input_dim), batch_shape=(n_batch,) + (n_v[0], input_dim),
                       name=name + 'input_signal')
        x = inputs

        # patch operator


        
        C = []
        W = []
        TA = []

        P = []
        AS = []
        
        
        for i in range(nstacks - 1):
            # pool_op = PoolingOperatorFixed(parents=parents[i], angular_shifts=angular_shifts[i], batch_size=n_batch)

            self.inputs_names.append(name + 'parents_' + int_to_string(i))
            P.append(Input(shape=(n_v[i + 1],),
                           batch_shape=(n_batch,) + (n_v[i + 1],),
                           dtype='int32',
                           name=name + 'parents_' + int_to_string(i)))
            self.inputs_names.append(name + 'angular_shifts_' + int_to_string(i))
            AS.append(Input(shape=(n_v[i + 1],),
                            batch_shape=(n_batch,) + (n_v[i + 1],),
                            dtype='float32',
                            name=name + 'angular_shifts_' + int_to_string(i)))

        for stack in range(nstacks):
            # patch_op = PatchOperatorFixed(contributors=contributors[stack],
            #                              weights=weights[stack],
            #                             angles=angles[stack])
            patch_op_shape = (n_v[stack], n_rings[stack], n_dirs[stack], 3)
            self.inputs_names.append(name + 'contributors_' + int_to_string(stack))
            C.append(Input(shape=patch_op_shape, batch_shape=(n_batch,) + patch_op_shape, dtype='int32',
                           name=name + 'contributors_' + int_to_string(stack)))
            self.inputs_names.append(name + 'weights_' + int_to_string(stack))
            W.append(Input(shape=patch_op_shape, batch_shape=(n_batch,) + patch_op_shape, dtype='float32',
                           name=name + 'weights_' + int_to_string(stack)))
            self.inputs_names.append(name + 'transport_' + int_to_string(stack))
            TA.append(Input(shape=patch_op_shape, batch_shape=(n_batch,) + patch_op_shape, dtype='float32',
                            name=name + 'transport_' + int_to_string(stack)))

        """
        stack_ = 0

        if num_filters is None:
            num_filters = K.int_shape(x)[-1]

        if K.int_shape(x)[-1] is not num_filters:
            x = gcnn_resnet_layer(inputs=x,
                                  contributors=C[stack_],
                                  weights=W[stack_],
                                  angles=TA[stack_],
                                  n_v=n_v[0],
                                  n_rings=n_rings[0],
                                  n_dirs=n_dirs[0],
                                  num_filters=num_filters,
                                  sync_mode=sync_mode,
                                  batch_normalization=bn,
                                  take_max=take_max)

        self.stacks = []

        # Instantiate the stack of residual units
        for stack in range(nstacks):
            for res_block in range(self.num_res_blocks):
                if stack > 0 and res_block == 0:  # first layer but not first stack
                    # pooling
                    # num_filters = 2*num_filters
                    if pool_:
                        # num_filters = int(np.sqrt((1. * n_v[stack - 1]) / (1. * n_v[stack])) * num_filters) + 1
                        # num_filters = int(np.sqrt(ratio[stack-1] / ratio[stack] + 0.0001)*num_filters)
                        # num_filters = int(np.sqrt(ratio[stack-1] / ratio[stack] + 0.0001)*K.int_shape(x)[-1])
                        stack_ = stack
                        if ratio[stack-1] > ratio[stack]:
                            x = Pooling()([x, P[stack-1], AS[stack-1]])
                        else:
                            x = TransposedPooling(new_nv=n_v[stack],
                                                  new_ndirs=n_dirs[stack])([x, P[stack - 1],
                                                                            AS[stack - 1],
                                                                           NV[stack]])
                            """
                            if n_v[stack] is None:
                                
                                key = 'stack_' + int_to_string(stack)
                                if key in self.additional_inputs:
                                    new_nv = K.shape(self.additional_inputs[key])[1]
                                else:
                                    raise ValueError('number of vertices could not be inferred')
                                
                                
                            else:
                                new_nv = n_v[stack]
                                x = TransposedPooling(new_nv=new_nv,
                                                      new_ndirs=n_dirs[stack])([x, P[stack - 1], AS[stack - 1]])
                            """
                    else:
                        num_filters *= 2
                        stack_ = 0

                    if self.additional_inputs is not None:
                        key = 'stack_' + int_to_string(stack)
                        if key in self.additional_inputs:
                            x = Concatenate(axis=-1)([x, self.additional_inputs[key]])

                    num_filters = int(np.sqrt(ratio[stack - 1] / ratio[stack] + 0.0001) * K.int_shape(x)[-1])

                y = gcnn_resnet_layer(inputs=x,
                                      contributors=C[stack_],
                                      weights=W[stack_],
                                      angles=TA[stack_],
                                      n_v=n_v[stack_],
                                      n_rings=n_rings[stack_],
                                      n_dirs=n_dirs[stack_],
                                      num_filters=num_filters,
                                      sync_mode=sync_mode,
                                      batch_normalization=bn,
                                      take_max=take_max)
                y = gcnn_resnet_layer(inputs=y,
                                      contributors=C[stack_],
                                      weights=W[stack_],
                                      angles=TA[stack_],
                                      n_v=n_v[stack_],
                                      n_rings=n_rings[stack_],
                                      n_dirs=n_dirs[stack_],
                                      num_filters=num_filters,
                                      sync_mode=sync_mode,
                                      batch_normalization=bn,
                                      take_max=take_max,
                                      activation=None)

                if stack > 0 and res_block == 0:  # first layer but not first stack
                    # linear projection residual shortcut connection to match
                    # changed dims
                    x = Dense(units=num_filters, use_bias=False, activation=None)(x)

                    # x = Dropout(0.25)(x)

                x = keras.layers.add([x, y])

                if res_block == self.num_res_blocks-1:
                    # save stack
                    self.stacks.append(x)

                x = Activation('relu')(x)


            # if stack > 0:
            #    num_filters = int(np.sqrt((1. * n_v[stack - 1]) / (1. * n_v[stack])) * num_filters)

        """
        # Add classifier on top.
        # v1 does not use BN after last shortcut connection-ReLU
        x = AngularMaxPooling(r=1, take_max=True)(x)
        # x = AngularAveragePooling(r=1, take_average=True)(x)
        x = GlobalAveragePooling1D()(x)
        y = Dense(num_classes,
                  kernel_initializer='he_normal',
                  name='final_vote')(x)
        outputs = Activation('softmax')(y)
        """

        # Instantiate model.
        self.output_dim = num_filters
        self.output = x
        self.input = inputs
        self.inputs_list = [self.input]

        for i in range(len(C)):
            self.inputs_list.append(C[i])
            self.inputs_list.append(W[i])
            self.inputs_list.append(TA[i])
        for i in range(len(P)):
            self.inputs_list.append(P[i])
            self.inputs_list.append(AS[i])
        self.inputs_dict = dict(zip(self.inputs_names, self.inputs_list))
 def __init__(self, pool_shape, mode='max'):
     self.mode = mode
     self.pool_h, self.pool_w = pool_shape
     self.pool_ob = Pooling()