Exemplo n.º 1
0
def _get_135_CostVolume_(inputs):
    shape = K.shape(inputs[0])
    disparity_costs = []
    for d in range(-4, 5):
        if d == 0:
            tmp_list = []
            for i in range(len(inputs)):
                tmp_list.append(inputs[i])
        else:
            tmp_list = []
            for i in range(len(inputs)):
                (v, u) = divmod(i, 9)
                v = v + i
                u = 8 - u
                tensor = tf.contrib.image.translate(inputs[i],
                                                    [d * (u - 4), d * (v - 4)],
                                                    'BILINEAR')
                tmp_list.append(tensor)

        cost = K.concatenate(tmp_list, axis=3)
        disparity_costs.append(cost)
    cost_volume = K.stack(disparity_costs, axis=1)
    cost_volume = K.reshape(cost_volume,
                            (shape[0], 9, shape[1], shape[2], 4 * 9))
    return cost_volume
Exemplo n.º 2
0
def atari_qnet(input_shape, num_actions, net_name, net_size):
    net_name = net_name.lower()

    # input state
    state = Input(shape=input_shape)

    # convolutional layers
    conv1_32 = Conv2D(32, (8, 8), strides=(4, 4), activation='relu')
    conv2_64 = Conv2D(64, (4, 4), strides=(2, 2), activation='relu')
    conv3_64 = Conv2D(64, (3, 3), strides=(1, 1), activation='relu')

    # if recurrent net then change input shape
    if 'drqn' in net_name:
        # recurrent net (drqn)
        lambda_perm_state = lambda x: K.permute_dimensions(x, [0, 3, 1, 2])
        perm_state = Lambda(lambda_perm_state)(state)
        dist_state = Lambda(lambda x: K.stack([x], axis=4))(perm_state)

        # extract features with `TimeDistributed` wrapped convolutional layers
        dist_conv1 = TimeDistributed(conv1_32)(dist_state)
        dist_conv2 = TimeDistributed(conv2_64)(dist_conv1)
        dist_convf = TimeDistributed(conv3_64)(dist_conv2)
        feature = TimeDistributed(Flatten())(dist_convf)
    elif 'dqn' in net_name:
        # fully connected net (dqn)
        # extract features with convolutional layers
        conv1 = conv1_32(state)
        conv2 = conv2_64(conv1)
        convf = conv3_64(conv2)
        feature = Flatten()(convf)

    # network type. Dense for dqn; LSTM or GRU for drqn
    if 'lstm' in net_name:
        net_type = LSTM
    elif 'gru' in net_name:
        net_type = GRU
    else:
        net_type = Dense

    # dueling or regular dqn/drqn
    if 'dueling' in net_name:
        value1 = net_type(net_size, activation='relu')(feature)
        adv1 = net_type(net_size, activation='relu')(feature)
        value2 = Dense(1)(value1)
        adv2 = Dense(num_actions)(adv1)
        mean_adv2 = Lambda(lambda x: K.mean(x, axis=1))(adv2)
        ones = K.ones([1, num_actions])
        lambda_exp = lambda x: K.dot(K.expand_dims(x, axis=1), -ones)
        exp_mean_adv2 = Lambda(lambda_exp)(mean_adv2)
        sum_adv = add([exp_mean_adv2, adv2])
        exp_value2 = Lambda(lambda x: K.dot(x, ones))(value2)
        q_value = add([exp_value2, sum_adv])
    else:
        hid = net_type(net_size, activation='relu')(feature)
        q_value = Dense(num_actions)(hid)

    # build model
    return Model(inputs=state, outputs=q_value)
Exemplo n.º 3
0
def atari_acnet(input_shape, num_actions, net_name, net_size):
    net_name = net_name.lower()

    # input state
    state = Input(shape=input_shape)

    # convolutional layers
    conv1_32 = Conv2D(32, (8, 8), strides=(4, 4), activation='relu')
    conv2_64 = Conv2D(64, (4, 4), strides=(2, 2), activation='relu')
    conv3_64 = Conv2D(64, (3, 3), strides=(1, 1), activation='relu')

    # if recurrent net then change input shape
    if 'lstm' in net_name or 'gru' in net_name:
        # recurrent net
        lambda_perm_state = lambda x: K.permute_dimensions(x, [0, 3, 1, 2])
        perm_state = Lambda(lambda_perm_state)(state)
        dist_state = Lambda(lambda x: K.stack([x], axis=4))(perm_state)

        # extract features with `TimeDistributed` wrapped convolutional layers
        dist_conv1 = TimeDistributed(conv1_32)(dist_state)
        dist_conv2 = TimeDistributed(conv2_64)(dist_conv1)
        dist_convf = TimeDistributed(conv3_64)(dist_conv2)
        feature = TimeDistributed(Flatten())(dist_convf)

        # specify net type for the following layer
        if 'lstm' in net_name:
            net_type = LSTM
        elif 'gru' in net_name:
            net_type = GRU
    elif 'fully connected' in net_name:
        # fully connected net
        # extract features with convolutional layers
        conv1 = conv1_32(state)
        conv2 = conv2_64(conv1)
        convf = conv3_64(conv2)
        feature = Flatten()(convf)

        # specify net type for the following layer
        net_type = Dense

    # actor (policy) and critic (value) stream
    hid = net_type(net_size, activation='relu')(feature)
    logits = Dense(num_actions, kernel_initializer='zeros')(hid)
    value = Dense(1)(hid)

    # build model
    return Model(inputs=state, outputs=[value, logits])
Exemplo n.º 4
0
def dilated_bn_feature_net_gather_61x61(input_shape=(2, 1080, 1280),
                                        training_examples=1e5,
                                        batch_size=None,
                                        n_features=3,
                                        reg=1e-5,
                                        init='he_normal',
                                        weights_path=None,
                                        permute=False):
    print("Using dilated feature net 61x61 with batch normalization")

    input1 = Input(shape=input_shape)

    d = 1
    conv1 = Conv2D(64, (3, 3),
                   dilation_rate=d,
                   kernel_initializer=init,
                   padding='valid',
                   batch_size=batch_size,
                   kernel_regularizer=l2(reg))(input1)
    norm1 = BatchNormalization(axis=1)(conv1)
    act1 = Activation('relu')(norm1)

    conv2 = Conv2D(64, (4, 4),
                   dilation_rate=d,
                   kernel_initializer=init,
                   padding='valid',
                   kernel_regularizer=l2(reg))(act1)
    norm2 = BatchNormalization(axis=1)(conv2)
    act2 = Activation('relu')(norm2)
    pool1 = dilated_MaxPool2D(dilation_rate=d, pool_size=(2, 2))(act2)
    d *= 2

    conv3 = Conv2D(64, (3, 3),
                   dilation_rate=d,
                   kernel_initializer=init,
                   padding='valid',
                   kernel_regularizer=l2(reg))(pool1)
    norm3 = BatchNormalization(axis=1)(conv3)
    act3 = Activation('relu')(norm3)

    conv4 = Conv2D(64, (3, 3),
                   dilation_rate=d,
                   kernel_initializer=init,
                   padding='valid',
                   kernel_regularizer=l2(reg))(act3)
    norm4 = BatchNormalization(axis=1)(conv4)
    act4 = Activation('relu')(norm4)
    pool2 = dilated_MaxPool2D(dilation_rate=d, pool_size=(2, 2))(act4)
    d *= 2

    conv5 = Conv2D(64, (3, 3),
                   dilation_rate=d,
                   kernel_initializer=init,
                   padding='valid',
                   kernel_regularizer=l2(reg))(pool2)
    norm5 = BatchNormalization(axis=1)(conv5)
    act5 = Activation('relu')(norm5)

    conv6 = Conv2D(64, (3, 3),
                   dilation_rate=d,
                   kernel_initializer=init,
                   padding='valid',
                   kernel_regularizer=l2(reg))(act5)
    norm6 = BatchNormalization(axis=1)(conv6)
    act6 = Activation('relu')(norm6)
    pool3 = dilated_MaxPool2D(dilation_rate=d, pool_size=(2, 2))(act6)
    d *= 2

    conv7 = Conv2D(200, (4, 4),
                   dilation_rate=d,
                   kernel_initializer=init,
                   padding='valid',
                   kernel_regularizer=l2(reg))(pool3)
    norm7 = BatchNormalization(axis=1)(conv7)
    act7 = Activation('relu')(norm7)

    tensorprod1 = TensorProd2D(200,
                               200,
                               kernel_initializer=init,
                               kernel_regularizer=l2(reg))(act7)
    norm8 = BatchNormalization(axis=1)(tensorprod1)
    act8 = Activation('relu')(norm8)

    tensorprod2 = TensorProd2D(200,
                               n_features,
                               kernel_initializer=init,
                               kernel_regularizer=l2(reg))(act8)
    act9 = Activation(axis_softmax)(tensorprod2)

    permute1 = Permute((2, 3, 1))(act9)

    batch_index_input = Input(batch_shape=(training_examples, ), dtype='int32')
    row_index_input = Input(batch_shape=(training_examples, ), dtype='int32')
    col_index_input = Input(batch_shape=(training_examples, ), dtype='int32')

    index1 = K.stack([batch_index_input, row_index_input, col_index_input],
                     axis=1)

    def gather_indices(x):
        return tf.gather_nd(x, index1)

    gather1 = Lambda(gather_indices)(permute1)

    model = Model(
        inputs=[input1, batch_index_input, row_index_input, col_index_input],
        outputs=[gather1])

    print(model.output_shape)

    return model