def __init__(self, input_dim, hidden_dim, action_dim, num_objects, ignore_action=False, copy_action=False, act_fn='relu'): super(TransitionGNN, self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.num_objects = num_objects self.ignore_action = ignore_action self.copy_action = copy_action if self.ignore_action: self.action_dim = 0 else: self.action_dim = action_dim self.edge_mlp = nn.Sequential( nn.Linear(input_dim*2, hidden_dim), utils.get_act_fn(act_fn), nn.Linear(hidden_dim, hidden_dim), nn.LayerNorm(hidden_dim), utils.get_act_fn(act_fn), nn.Linear(hidden_dim, hidden_dim)) node_input_dim = hidden_dim + input_dim + self.action_dim self.node_mlp = nn.Sequential( nn.Linear(node_input_dim, hidden_dim), utils.get_act_fn(act_fn), nn.Linear(hidden_dim, hidden_dim), nn.LayerNorm(hidden_dim), utils.get_act_fn(act_fn), nn.Linear(hidden_dim, input_dim)) self.edge_list = None self.batch_size = 0
def __init__(self, input_dim, hidden_dim, num_objects, output_size, act_fn='relu'): super(DecoderCNNMedium, self).__init__() width, height = output_size[1] // 5, output_size[2] // 5 output_dim = width * height self.fc1 = nn.Linear(input_dim, hidden_dim) self.fc2 = nn.Linear(hidden_dim, hidden_dim) self.fc3 = nn.Linear(hidden_dim, output_dim) self.ln = nn.LayerNorm(hidden_dim) self.deconv1 = nn.ConvTranspose2d(num_objects, hidden_dim, kernel_size=5, stride=5) self.deconv2 = nn.ConvTranspose2d(hidden_dim, output_size[0], kernel_size=9, padding=4) self.ln1 = nn.BatchNorm2d(hidden_dim) self.input_dim = input_dim self.num_objects = num_objects self.map_size = output_size[0], width, height self.act1 = utils.get_act_fn(act_fn) self.act2 = utils.get_act_fn(act_fn) self.act3 = utils.get_act_fn(act_fn)
def __init__(self, input_dim, hidden_dim, num_objects, output_size, act_fn='relu'): super(DecoderCNNSmall_test, self).__init__() width, height = output_size[1] // 10, output_size[2] // 10 output_dim = width * height self.fc1 = nn.Linear(input_dim, hidden_dim) self.fc2 = nn.Linear(hidden_dim, hidden_dim) self.fc3 = nn.Linear(hidden_dim, output_dim) self.ln = nn.LayerNorm(hidden_dim) self.deconv1 = nn.ConvTranspose2d(num_objects, hidden_dim, kernel_size=1, stride=1) self.deconv2 = nn.ConvTranspose2d(hidden_dim, output_size[0], kernel_size=10, stride=10) self.input_dim = input_dim self.num_objects = num_objects self.map_size = output_size[0], width, height self.act1 = utils.get_act_fn(act_fn) self.act2 = utils.get_act_fn(act_fn) self.act3 = utils.get_act_fn(act_fn)
def __init__(self, input_dim, hidden_dim, num_objects, act_fn='sigmoid', act_fn_hid='relu'): super(EncoderCNNSmall, self).__init__() self.cnn1 = nn.Conv2d( input_dim, hidden_dim, (10, 10), stride=10) self.cnn2 = nn.Conv2d(hidden_dim, num_objects, (1, 1), stride=1) self.ln1 = nn.BatchNorm2d(hidden_dim) self.act1 = utils.get_act_fn(act_fn_hid) self.act2 = utils.get_act_fn(act_fn)
def __init__(self, input_dim, hidden_dim, action_dim, num_objects, ignore_action=False, copy_action=False, act_fn='relu', immovable_bit=False, split_gnn=False, factored_continuous_action=False): super(TransitionGNN, self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.num_objects = num_objects self.ignore_action = ignore_action self.copy_action = copy_action self.immovable_bit = immovable_bit self.split_gnn = split_gnn self.factored_continuous_action = factored_continuous_action if self.immovable_bit: self.input_dim += 1 if self.ignore_action: self.action_dim = 0 else: self.action_dim = action_dim self.edge_mlp = nn.Sequential( nn.Linear(self.input_dim*2, hidden_dim), utils.get_act_fn(act_fn), nn.Linear(hidden_dim, hidden_dim), nn.LayerNorm(hidden_dim), utils.get_act_fn(act_fn), nn.Linear(hidden_dim, hidden_dim)) if num_objects > 1: node_input_dim = hidden_dim + self.input_dim + self.action_dim else: node_input_dim = self.input_dim + self.action_dim self.node_mlp = nn.Sequential( nn.Linear(node_input_dim, hidden_dim), utils.get_act_fn(act_fn), nn.Linear(hidden_dim, hidden_dim), nn.LayerNorm(hidden_dim), utils.get_act_fn(act_fn), nn.Linear(hidden_dim, self.input_dim)) if self.split_gnn: self.edge_mlp1 = self.edge_mlp self.edge_mlp2 = cp.deepcopy(self.edge_mlp1) self.edge_mlp3 = cp.deepcopy(self.edge_mlp1) self.node_mlp1 = self.node_mlp self.node_mlp2 = cp.deepcopy(self.node_mlp1) del self.node_mlp del self.edge_mlp self.edge_list = None self.batch_size = 0
def __init__(self, input_dim, hidden_dim, num_objects, act_fn='sigmoid', act_fn_hid='leaky_relu'): super(EncoderCNNMedium, self).__init__() self.cnn1 = nn.Conv2d( input_dim, hidden_dim, (9, 9), padding=4) self.act1 = utils.get_act_fn(act_fn_hid) self.ln1 = nn.BatchNorm2d(hidden_dim) self.cnn2 = nn.Conv2d( hidden_dim, num_objects, (5, 5), stride=5) self.act2 = utils.get_act_fn(act_fn)
def __init__(self, input_dim, output_dim, hidden_dim, act_fn='relu'): super(MLP, self).__init__() self.input_dim = input_dim self.fc1 = nn.Linear(self.input_dim, hidden_dim) self.fc2 = nn.Linear(hidden_dim, hidden_dim) self.fc3 = nn.Linear(hidden_dim, output_dim) self.ln = nn.LayerNorm(hidden_dim) self.act1 = utils.get_act_fn(act_fn) self.act2 = utils.get_act_fn(act_fn)
def __init__(self, input_dim, hidden_dim, action_dim, act_fn='relu'): super(Inverse, self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.action_dim = action_dim self.inverse_mlp = nn.Sequential( nn.Linear(input_dim, hidden_dim), utils.get_act_fn(act_fn), nn.Linear(hidden_dim, hidden_dim), # nn.LayerNorm(hidden_dim), #???????? norm why not good in this case utils.get_act_fn(act_fn), nn.Linear(hidden_dim, action_dim))
def __init__(self, input_dim, hidden_dim, num_objects, output_size, act_fn='relu'): super(DecoderMLP, self).__init__() self.fc1 = nn.Linear(input_dim + num_objects, hidden_dim) self.fc2 = nn.Linear(hidden_dim, hidden_dim) self.fc3 = nn.Linear(hidden_dim, np.prod(output_size)) self.input_dim = input_dim self.num_objects = num_objects self.output_size = output_size self.act1 = utils.get_act_fn(act_fn) self.act2 = utils.get_act_fn(act_fn)
def __init__(self, mconfig, name=None): super().__init__(name=name) self.endpoints = {} self._mconfig = mconfig self._conv_head = tf.keras.layers.Conv2D( filters=round_filters(mconfig.feature_size or 1280, mconfig), kernel_size=1, strides=1, kernel_initializer=conv_kernel_initializer, padding='same', data_format=mconfig.data_format, use_bias=False, name='conv2d') self._norm = utils.normalization( mconfig.bn_type, axis=(1 if mconfig.data_format == 'channels_first' else -1), momentum=mconfig.bn_momentum, epsilon=mconfig.bn_epsilon, groups=mconfig.gn_groups) self._act = utils.get_act_fn(mconfig.act_fn) self._avg_pooling = tf.keras.layers.GlobalAveragePooling2D( data_format=mconfig.data_format) if mconfig.dropout_rate > 0: self._dropout = tf.keras.layers.Dropout(mconfig.dropout_rate) else: self._dropout = None self.h_axis, self.w_axis = ([2, 3] if mconfig.data_format == 'channels_first' else [1, 2])
def __init__(self, block_args, mconfig, name=None): """Initializes a MBConv block. Args: block_args: BlockArgs, arguments to create a Block. mconfig: GlobalParams, a set of global parameters. name: layer name. """ super().__init__(name=name) self._block_args = copy.deepcopy(block_args) self._mconfig = copy.deepcopy(mconfig) self._local_pooling = mconfig.local_pooling self._data_format = mconfig.data_format self._channel_axis = 1 if self._data_format == 'channels_first' else -1 self._act = utils.get_act_fn(mconfig.act_fn) self._has_se = ( self._block_args.se_ratio is not None and 0 < self._block_args.se_ratio <= 1) self.endpoints = None # Builds the block accordings to arguments. self._build()
def __init__(self, mconfig, se_filters, output_filters, name=None): super().__init__(name=name) self._local_pooling = mconfig.local_pooling self._data_format = mconfig.data_format self._act = utils.get_act_fn(mconfig.act_fn) # Squeeze and Excitation layer. self._se_reduce = tf.keras.layers.Conv2D( se_filters, kernel_size=1, strides=1, kernel_initializer=conv_kernel_initializer, padding='same', data_format=self._data_format, use_bias=True, name='conv2d') self._se_expand = tf.keras.layers.Conv2D( output_filters, kernel_size=1, strides=1, kernel_initializer=conv_kernel_initializer, padding='same', data_format=self._data_format, use_bias=True, name='conv2d_1')
def __init__(self, input_dim, hidden_dim, num_objects, act_fn='sigmoid', act_fn_hid='relu'): super(EncoderCNNLarge, self).__init__() self.cnn1 = nn.Conv2d(input_dim, hidden_dim, (3, 3), padding=1) self.act1 = utils.get_act_fn(act_fn_hid) self.ln1 = nn.BatchNorm2d(hidden_dim) self.cnn2 = nn.Conv2d(hidden_dim, hidden_dim, (3, 3), padding=1) self.act2 = utils.get_act_fn(act_fn_hid) self.ln2 = nn.BatchNorm2d(hidden_dim) self.cnn3 = nn.Conv2d(hidden_dim, hidden_dim, (3, 3), padding=1) self.act3 = utils.get_act_fn(act_fn_hid) self.ln3 = nn.BatchNorm2d(hidden_dim) self.cnn4 = nn.Conv2d(hidden_dim, num_objects, (3, 3), padding=1) self.act4 = utils.get_act_fn(act_fn)
def __init__(self, input_dim, output_dim, hidden_dim, num_objects, act_fn='relu'): super(EncoderMLP, self).__init__() self.num_objects = num_objects self.input_dim = input_dim print(self.input_dim) self.fc1 = nn.Linear(self.input_dim, hidden_dim) self.fc2 = nn.Linear(hidden_dim, hidden_dim) self.fc3 = nn.Linear(hidden_dim, output_dim) self.ln = nn.LayerNorm(hidden_dim) self.act1 = utils.get_act_fn(act_fn) self.act2 = utils.get_act_fn(act_fn)
def __init__(self, mconfig, stem_filters, name=None): super().__init__(name=name) self._conv_stem = tf.keras.layers.Conv2D( filters=round_filters(stem_filters, mconfig), kernel_size=3, strides=2, kernel_initializer=conv_kernel_initializer, padding='same', data_format=mconfig.data_format, use_bias=False, name='conv2d') self._norm = utils.normalization( mconfig.bn_type, axis=(1 if mconfig.data_format == 'channels_first' else -1), momentum=mconfig.bn_momentum, epsilon=mconfig.bn_epsilon, groups=mconfig.gn_groups) self._act = utils.get_act_fn(mconfig.act_fn)