def output_specific_fully_connected(self, feature_hidden, feature_hidden_size, dropout_rate, regularizer, is_training=True): original_feature_hidden = feature_hidden if len(original_feature_hidden.shape) > 2: feature_hidden = tf.reshape(feature_hidden, [-1, feature_hidden_size]) if self.fc_layers is not None or self.num_fc_layers > 0: fc_stack = FCStack(layers=self.fc_layers, num_layers=self.num_fc_layers, default_fc_size=self.fc_size, default_activation=self.activation, default_norm=self.norm, default_dropout=self.dropout, default_regularize=self.regularize, default_initializer=self.initializer) feature_hidden = fc_stack(feature_hidden, feature_hidden_size, regularizer, dropout_rate, is_training=is_training) feature_hidden_size = feature_hidden.shape.as_list()[-1] if len(original_feature_hidden.shape) > 2: sequence_length = tf.shape(original_feature_hidden)[1] feature_hidden = tf.reshape( feature_hidden, [-1, sequence_length, feature_hidden_size]) return feature_hidden, feature_hidden_size
def __init__(self, fc_layers=None, num_fc_layers=None, fc_size=256, norm=None, activation='relu', dropout=False, initializer=None, regularize=True, **kwargs): self.fc_stack = None if fc_layers is None and \ num_fc_layers is not None: fc_layers = [] for i in range(num_fc_layers): fc_layers.append({'fc_size': fc_size}) if fc_layers is not None: self.fc_stack = FCStack(layers=fc_layers, num_layers=num_fc_layers, default_fc_size=fc_size, default_norm=norm, default_activation=activation, default_dropout=dropout, default_initializer=initializer, default_regularize=regularize)
def __init__(self, resnet_size=50, num_filters=16, kernel_size=3, conv_stride=1, first_pool_size=None, first_pool_stride=None, fc_layers=None, num_fc_layers=1, fc_size=256, norm=None, activation='relu', dropout=False, regularize=True, initializer=None, **kwargs): if resnet_size < 50: bottleneck = False else: bottleneck = True block_sizes = get_resnet_block_sizes(resnet_size) block_strides = [1, 2, 2, 2][:len(block_sizes)] self.resnet = ResNet(resnet_size, bottleneck, num_filters, kernel_size, conv_stride, first_pool_size, first_pool_stride, block_sizes, block_strides) self.fc_stack = FCStack(layers=fc_layers, num_layers=num_fc_layers, default_fc_size=fc_size, default_activation=activation, default_norm=norm, default_dropout=dropout, default_regularize=regularize, default_initializer=initializer)
def __init__(self, fc_layers=None, num_fc_layers=1, fc_size=256, norm=None, activation='relu', dropout=False, regularize=True, initializer=None, **kwargs): self.fc_stack = FCStack(layers=fc_layers, num_layers=num_fc_layers, default_fc_size=fc_size, default_activation=activation, default_norm=norm, default_dropout=dropout, default_regularize=regularize, default_initializer=initializer)
def __init__(self, conv_layers=None, num_conv_layers=None, filter_size=3, num_filters=32, pool_size=2, stride=1, pool_stride=2, fc_layers=None, num_fc_layers=1, fc_size=128, norm=None, activation='relu', dropout=True, regularize=True, initializer=None, **kwargs): self.conv_stack_2d = ConvStack2D(layers=conv_layers, num_layers=num_conv_layers, default_filter_size=filter_size, default_num_filters=num_filters, default_pool_size=pool_size, default_activation=activation, default_stride=stride, default_pool_stride=pool_stride, default_norm=norm, default_dropout=dropout, default_regularize=regularize, default_initializer=initializer) self.fc_stack = FCStack(layers=fc_layers, num_layers=num_fc_layers, default_fc_size=fc_size, default_activation=activation, default_norm=norm, default_dropout=dropout, default_regularize=regularize, default_initializer=initializer)
def __init__(self, embedding_size=10, embeddings_on_cpu=False, dropout=False, fc_layers=None, num_fc_layers=0, fc_size=10, norm=None, activation='relu', initializer=None, regularize=True, **kwargs): """ :param embedding_size: it is the maximum embedding size, the actual size will be `min(vocaularyb_size, embedding_size)` for `dense` representations and exacly `vocaularyb_size` for the `sparse` encoding, where `vocabulary_size` is the number of different strings appearing in the training set in the column the feature is named after (plus 1 for `<UNK>`). :type embedding_size: Integer :param embeddings_on_cpu: by default embedings matrices are stored on GPU memory if a GPU is used, as it allows for faster access, but in some cases the embedding matrix may be really big and this parameter forces the placement of the embedding matrix in regular memroy and the CPU is used to resolve them, slightly slowing down the process as a result of data transfer between CPU and GPU memory. :param dropout: determines if there should be a dropout layer before returning the encoder output. :type dropout: Boolean :param initializer: the initializer to use. If `None`, the default initialized of each variable is used (`glorot_uniform` in most cases). Options are: `constant`, `identity`, `zeros`, `ones`, `orthogonal`, `normal`, `uniform`, `truncated_normal`, `variance_scaling`, `glorot_normal`, `glorot_uniform`, `xavier_normal`, `xavier_uniform`, `he_normal`, `he_uniform`, `lecun_normal`, `lecun_uniform`. Alternatively it is possible to specify a dictionary with a key `type` that identifies the type of initialzier and other keys for its parameters, e.g. `{type: normal, mean: 0, stddev: 0}`. To know the parameters of each initializer, please refer to TensorFlow's documentation. :type initializer: str :param regularize: if `True` the embedding wieghts are added to the set of weights that get reularized by a regularization loss (if the `regularization_lambda` in `training` is greater than 0). :type regularize: Boolean """ self.year_fc = FCStack(num_layers=1, default_fc_size=1, default_activation=None, default_norm=None, default_dropout=dropout, default_regularize=regularize, default_initializer=initializer) self.embed_month = Embed([str(i) for i in range(12)], embedding_size, representation='dense', embeddings_trainable=True, pretrained_embeddings=None, embeddings_on_cpu=embeddings_on_cpu, dropout=dropout, initializer=initializer, regularize=regularize) self.embed_day = Embed([str(i) for i in range(31)], embedding_size, representation='dense', embeddings_trainable=True, pretrained_embeddings=None, embeddings_on_cpu=embeddings_on_cpu, dropout=dropout, initializer=initializer, regularize=regularize) self.embed_weekday = Embed([str(i) for i in range(7)], embedding_size, representation='dense', embeddings_trainable=True, pretrained_embeddings=None, embeddings_on_cpu=embeddings_on_cpu, dropout=dropout, initializer=initializer, regularize=regularize) self.embed_yearday = Embed([str(i) for i in range(366)], embedding_size, representation='dense', embeddings_trainable=True, pretrained_embeddings=None, embeddings_on_cpu=embeddings_on_cpu, dropout=dropout, initializer=initializer, regularize=regularize) self.embed_hour = Embed([str(i) for i in range(24)], embedding_size, representation='dense', embeddings_trainable=True, pretrained_embeddings=None, embeddings_on_cpu=embeddings_on_cpu, dropout=dropout, initializer=initializer, regularize=regularize) self.embed_minute = Embed([str(i) for i in range(60)], embedding_size, representation='dense', embeddings_trainable=True, pretrained_embeddings=None, embeddings_on_cpu=embeddings_on_cpu, dropout=dropout, initializer=initializer, regularize=regularize) self.embed_second = Embed([str(i) for i in range(60)], embedding_size, representation='dense', embeddings_trainable=True, pretrained_embeddings=None, embeddings_on_cpu=embeddings_on_cpu, dropout=dropout, initializer=initializer, regularize=regularize) self.fc_stack = FCStack(layers=fc_layers, num_layers=num_fc_layers, default_fc_size=fc_size, default_activation=activation, default_norm=norm, default_dropout=dropout, default_regularize=regularize, default_initializer=initializer)