Example #1
0
    def build_network(self, inputs):
        batch_size = inputs.shape[0]
        g = self.build_conv(self.dimension, "g", inputs, bn=False)
        g = self.build_maxpool(self.dimension, g)
        g_inputs = flow.reshape(g, shape=[batch_size, self.inter_channels, -1])
        g_inputs = flow.transpose(g_inputs, perm=[0, 2, 1])

        theta = self.build_conv(self.dimension, "theta", inputs, bn=False)
        theta_inputs = flow.reshape(
            theta, shape=[batch_size, self.inter_channels, -1])
        theta_inputs = flow.transpose(theta_inputs, perm=[0, 2, 1])

        phi = self.build_conv(self.dimension, "phi", inputs, bn=False)
        phi = self.build_maxpool(self.dimension, phi)
        phi_inputs = flow.reshape(phi,
                                  shape=[batch_size, self.inter_channels, -1])

        f = flow.linalg.matmul(theta_inputs, phi_inputs)
        f = nn.softmax(f, axis=-1)

        y = flow.linalg.matmul(f, g_inputs)
        y = flow.transpose(y, perm=[0, 2, 1])
        y = flow.reshape(
            y, shape=[batch_size, self.inter_channels, *inputs.shape[2:]])

        W = self.build_conv(self.dimension, "W", y, bn=self.bn_layer)
        output = W + inputs
        return output
    def get_masked_lm_loss(
        logit_blob,
        masked_lm_positions,
        masked_lm_labels,
        label_weights,
        max_prediction_per_seq,
    ):
        # gather valid position indices
        logit_blob = flow.gather(
            logit_blob,
            index=masked_lm_positions.unsqueeze(2).repeat(
                1, 1, args.vocab_size),
            dim=1,
        )

        logit_blob = flow.reshape(logit_blob, [-1, args.vocab_size])
        label_id_blob = flow.reshape(masked_lm_labels, [-1])

        # The `positions` tensor might be zero-padded (if the sequence is too
        # short to have the maximum number of predictions). The `label_weights`
        # tensor has a value of 1.0 for every real prediction and 0.0 for the
        # padding predictions.
        pre_example_loss = mlm_criterion(logit_blob, label_id_blob)
        pre_example_loss = flow.reshape(pre_example_loss,
                                        [-1, max_prediction_per_seq])
        numerator = flow.sum(pre_example_loss * label_weights)
        denominator = flow.sum(label_weights) + 1e-5
        loss = numerator / denominator
        return loss
Example #3
0
 def predict(self, images, anchors_s, anchors_l):
     '''
     :param images: [N, 3, 416, 416]
     :param anchors_s: [anchor_per_scale, 2]
     :param anchors_l: [anchor_per_scale, 2]
     :return: [N, -1, 4+1+class_num]
         pred_bbox: [N, -1, 4]
         pred_conf: [N, -1, 1]
         pred_pred: [N, -1, class_num]
     '''
     conv_lbbox, conv_sbbox = self.network(images)
     conv_sbbox = flow.transpose(conv_sbbox, perm=[0, 2, 3, 1])
     conv_lbbox = flow.transpose(conv_lbbox, perm=[0, 2, 3, 1])
     pred_s, _ = self.decode(conv_sbbox,
                             anchors_s,
                             self.strides[0],
                             prefix='decode_s')
     pred_l, _ = self.decode(conv_lbbox,
                             anchors_l,
                             self.strides[1],
                             prefix='decode_l')
     pred_s = flow.reshape(pred_s, [pred_s.shape[0], -1, pred_s.shape[-1]])
     pred_l = flow.reshape(pred_l, [pred_l.shape[0], -1, pred_l.shape[-1]])
     pred = flow.concat([pred_s, pred_l], axis=-2)
     # pred_bbox = flow.slice(pred, begin=[None, None, 0], size=[None, None, 4])
     # pred_conf = flow.slice(pred, begin=[None, None, 4], size=[None, None, 1])
     # pred_pred = flow.slice(pred, begin=[None, None, 5], size=[None, None, pred.shape[-1]-5])
     # return pred_bbox, pred_conf, pred_pred
     return pred
Example #4
0
            def build(self):
                (
                    input_ids,
                    input_mask,
                    segment_ids,
                    start_positions,
                    end_positions,
                ) = self._decoders()
                input_ids = input_ids.to(device=device)
                input_mask = input_mask.to(device=device)
                segment_ids = segment_ids.to(device=device)
                start_positions = start_positions.to(device=device)
                end_positions = end_positions.to(device=device)

                start_logits, end_logits = self.squad_model(
                    input_ids, segment_ids, input_mask
                )
                start_logits = flow.reshape(start_logits, [-1, args.seq_length])
                end_logits = flow.reshape(end_logits, [-1, args.seq_length])

                start_loss = self.criterion(start_logits, start_positions.squeeze(1))
                end_loss = self.criterion(end_logits, end_positions.squeeze(1))
                total_loss = (start_loss + end_loss) * 0.5
                total_loss.backward()

                return total_loss
Example #5
0
def restsn(images, batch_size=1, trainable=False):
    num_seg = images.shape[0] // batch_size
    with flow.deprecated.variable_scope("base"):
        stem = layer0(images, trainable=trainable)
        feature = resnet_conv_x_body(stem, lambda x: x, trainable=trainable)
        # print('feature shape: {}'.format(feature.shape))
        pool = flow.nn.max_pool2d(feature,
                                  ksize=7,
                                  strides=1,
                                  padding="VALID",
                                  data_format="NCHW",
                                  name="gap")
        # print('pool shape: {}'.format(pool.shape))
        x = flow.reshape(pool,
                         shape=(-1, num_seg, pool.shape[1], pool.shape[2],
                                pool.shape[3]))

        # print('x shape: {}'.format(x.shape))
        consensus1 = flow.math.reduce_mean(x, axis=(1))
        # print('consensus shape: {}'.format(consensus1.shape))
        print(type(consensus1))
        consensus = flow.reshape(consensus1, (batch_size, 2048))
        output = flow.layers.dense(inputs=consensus,
                                   units=400,
                                   activation=None,
                                   use_bias=True,
                                   trainable=True,
                                   name="cls_head-fc_cls")
        print('output shape: {}'.format(output.shape))

    return output
def get_masked_lm_loss(
    logit_blob,
    masked_lm_positions,
    masked_lm_labels,
    label_weights,
    max_prediction_per_seq=20,
):
    # gather valid position indices
    logit_blob = flow.gather(
        logit_blob,
        index=masked_lm_positions.unsqueeze(2).repeat(1, 1, 30522),
        dim=1,
    )
    logit_blob = flow.reshape(logit_blob, [-1, 30522])
    label_id_blob = flow.reshape(masked_lm_labels, [-1])

    # The `positions` tensor might be zero-padded (if the sequence is too
    # short to have the maximum number of predictions). The `label_weights`
    # tensor has a value of 1.0 for every real prediction and 0.0 for the
    # padding predictions.
    pre_example_loss = nn.CrossEntropyLoss(reduction="none")(logit_blob,
                                                             label_id_blob)
    pre_example_loss = flow.reshape(pre_example_loss,
                                    [-1, max_prediction_per_seq])
    sum_label_weight = flow.sum(label_weights, dim=-1)
    sum_label_weight = sum_label_weight / label_weights.shape[0]
    numerator = flow.sum(pre_example_loss * label_weights)
    denominator = flow.sum(label_weights) + 1e-5
    loss = numerator / denominator
    return logit_blob, loss
Example #7
0
def _dense_layer(
    inputs,
    units,
    activation=None,
    use_bias=True,
    kernel_initializer=None,
    bias_initializer=None,
    trainable=True,
    name=None,
):
    in_shape = inputs.shape
    in_num_axes = len(in_shape)
    assert in_num_axes >= 2

    name_prefix = name if name is not None else id_util.UniqueStr("Dense_")
    inputs = flow.reshape(inputs,
                          (-1, in_shape[-1])) if in_num_axes > 2 else inputs

    weight = flow.get_variable(
        name="{}-weight".format(name_prefix),
        shape=(units, inputs.shape[1]),
        dtype=inputs.dtype,
        initializer=(kernel_initializer if kernel_initializer is not None else
                     flow.constant_initializer(0)),
        trainable=trainable,
        model_name="weight",
    )
    weight = flow.identity(weight)
    weight = flow.repeat(weight, args.num_piece_in_batch)

    out = flow.matmul(
        a=inputs,
        b=weight,
        transpose_b=True,
        name="{}_matmul".format(name_prefix),
    )
    if use_bias:
        bias = flow.get_variable(
            name="{}-bias".format(name_prefix),
            shape=(units, ),
            dtype=inputs.dtype,
            initializer=(bias_initializer if bias_initializer is not None else
                         flow.constant_initializer(0)),
            trainable=trainable,
            model_name="bias",
        )

        bias = flow.identity(bias)
        bias = flow.repeat(bias, args.num_piece_in_batch)

        out = flow.nn.bias_add(out,
                               bias,
                               name="{}_bias_add".format(name_prefix))
    out = (activation(out, name="{}_activation".format(name_prefix))
           if activation is not None else out)
    out = flow.reshape(out, in_shape[:-1] +
                       (units, )) if in_num_axes > 2 else out

    return out
Example #8
0
 def forward(self, x):
     out0 = self.linear1(x)
     out0 = flow.reshape(out0, (-1, 2, 4))
     out0 = out0 + 1.0
     out0 = out0 * 2.0
     out0 = flow.reshape(out0, (-1, 8))
     out1 = self.linear2(out0)
     return out1
def _model(dense_fields, wide_sparse_fields, deep_sparse_fields):
    wide_sparse_fields = flow.parallel_cast(
        wide_sparse_fields, distribute=flow.distribute.broadcast())
    wide_embedding_table = flow.get_variable(
        name='wide_embedding',
        shape=(FLAGS.wide_vocab_size, 1),
        initializer=flow.random_uniform_initializer(minval=-0.05, maxval=0.05),
        distribute=flow.distribute.split(0),
    )
    wide_embedding = flow.gather(params=wide_embedding_table,
                                 indices=wide_sparse_fields)
    wide_embedding = flow.reshape(wide_embedding,
                                  shape=(-1, wide_embedding.shape[-1] *
                                         wide_embedding.shape[-2]))
    wide_scores = flow.math.reduce_sum(wide_embedding, axis=[1], keepdims=True)
    wide_scores = flow.parallel_cast(
        wide_scores,
        distribute=flow.distribute.split(0),
        gradient_distribute=flow.distribute.broadcast())

    deep_sparse_fields = flow.parallel_cast(
        deep_sparse_fields, distribute=flow.distribute.broadcast())
    deep_embedding_table = flow.get_variable(
        name='deep_embedding',
        shape=(FLAGS.deep_vocab_size, FLAGS.deep_embedding_vec_size),
        initializer=flow.random_uniform_initializer(minval=-0.05, maxval=0.05),
        distribute=flow.distribute.split(1),
    )
    deep_embedding = flow.gather(params=deep_embedding_table,
                                 indices=deep_sparse_fields)
    deep_embedding = flow.parallel_cast(
        deep_embedding,
        distribute=flow.distribute.split(0),
        gradient_distribute=flow.distribute.split(2))
    deep_embedding = flow.reshape(deep_embedding,
                                  shape=(-1, deep_embedding.shape[-1] *
                                         deep_embedding.shape[-2]))
    deep_features = flow.concat([deep_embedding, dense_fields], axis=1)
    for idx, units in enumerate(DEEP_HIDDEN_UNITS):
        deep_features = flow.layers.dense(
            deep_features,
            units=units,
            kernel_initializer=flow.glorot_uniform_initializer(),
            bias_initializer=flow.constant_initializer(0.0),
            activation=flow.math.relu,
            name='fc' + str(idx + 1))
        deep_features = flow.nn.dropout(deep_features,
                                        rate=FLAGS.deep_dropout_rate)
    deep_scores = flow.layers.dense(
        deep_features,
        units=1,
        kernel_initializer=flow.glorot_uniform_initializer(),
        bias_initializer=flow.constant_initializer(0.0),
        name='fc' + str(len(DEEP_HIDDEN_UNITS) + 1))

    scores = wide_scores + deep_scores
    return scores
Example #10
0
def PixelShuffle(input, r):
    b, c, h, w = input.shape
    assert c % (r * r) == 0
    new_c = int(c / (r * r))
    out = flow.reshape(input, [b, new_c, r * r, h, w])
    out = flow.reshape(out, [b * new_c, r, r, h, w])
    out = flow.transpose(out, [0, 3, 1, 4, 2])
    out = flow.reshape(out, [b, new_c, h * r, w * r])
    return out
Example #11
0
def _AddMaskedLanguageModelLoss(
    input_blob,
    output_weights_blob,
    positions_blob,
    label_id_blob,
    label_weight_blob,
    seq_length,
    hidden_size,
    vocab_size,
    max_predictions_per_seq,
    hidden_act,
    initializer_range,
):
    with flow.scope.namespace("other"):
        sum_label_weight_blob = flow.math.reduce_sum(label_weight_blob, axis=[-1])
        ones = sum_label_weight_blob * 0.0 + 1.0
        sum_label_weight_blob = flow.math.reduce_sum(sum_label_weight_blob)
        batch_size = flow.math.reduce_sum(ones)
        sum_label_weight_blob = sum_label_weight_blob / batch_size
    with flow.scope.namespace("cls-predictions"):
        input_blob = _GatherIndexes(input_blob, positions_blob, seq_length, hidden_size)
        with flow.scope.namespace("transform"):
            if callable(hidden_act):
                act_fn = op_conf_util.kNone
            else:
                act_fn = hidden_act
            input_blob = bert_util._FullyConnected(
                input_blob,
                input_size=hidden_size,
                units=hidden_size,
                activation=act_fn,
                weight_initializer=bert_util.CreateInitializer(initializer_range),
                name="dense",
            )
            if callable(hidden_act):
                input_blob = hidden_act(input_blob)
                input_blob = bert_util._LayerNorm(input_blob, hidden_size)
        output_bias = flow.get_variable(
            name="output_bias",
            shape=[vocab_size],
            dtype=input_blob.dtype,
            initializer=flow.constant_initializer(1.0),
        )
        logit_blob = flow.matmul(input_blob, output_weights_blob, transpose_b=True)
        logit_blob = flow.nn.bias_add(logit_blob, output_bias)
        label_id_blob = flow.reshape(label_id_blob, [-1])
        pre_example_loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logit_blob, labels=label_id_blob
        )
        pre_example_loss = flow.reshape(pre_example_loss, [-1, max_predictions_per_seq])
        numerator = pre_example_loss * label_weight_blob
        with flow.scope.namespace("loss"):
            numerator = flow.math.reduce_sum(numerator, axis=[-1])
            denominator = sum_label_weight_blob + 1e-5
            loss = numerator / denominator
        return loss, pre_example_loss, logit_blob
Example #12
0
    def forward(self, input_ids, token_type_ids, attention_mask):
        sequence_output, _ = self.bert(input_ids, token_type_ids,
                                       attention_mask)
        final_hidden = flow.reshape(sequence_output, [-1, self.hidden_size])

        prediction_logits = self.cls_squad(final_hidden)
        prediction_logits = flow.reshape(prediction_logits,
                                         [-1, self.seq_length, 2])
        start_logits = prediction_logits[:, :, 0]
        end_loigts = prediction_logits[:, :, 1]
        return start_logits, end_loigts
Example #13
0
    def val_faceseg_job(image=flow.FixedTensorDef((val_batch_size,3,img_height,img_width), dtype=flow.float),
                        mask=flow.FixedTensorDef((val_batch_size,1,img_height,img_width), dtype=flow.float)
    ):

        feature = LinkNet34(image,trainable=False,batch_size=batch_size) # use linknet34 model to segment face

        feature= flow.reshape(feature,[-1])
        mask = flow.reshape(mask,[-1])

        loss = BinaryLoss(feature,mask, jaccard_weight=jaccard_weight) # loss function

        return loss, feature
Example #14
0
def _CreateAttentionMaskFromInputMask(to_mask_blob, from_seq_length, to_seq_length):
    output = flow.cast(to_mask_blob, dtype=flow.float)
    output = flow.reshape(output, [-1, 1, to_seq_length])
    zeros = flow.constant(0.0, dtype=flow.float, shape=[from_seq_length, to_seq_length])
    attention_mask_blob = zeros + output
    attention_mask_blob = flow.reshape(
        attention_mask_blob, [-1, 1, from_seq_length, to_seq_length]
    )
    attention_mask_blob = flow.cast(attention_mask_blob, dtype=flow.float)
    addr_blob = (attention_mask_blob - 1.0) * 10000.0

    return addr_blob
Example #15
0
    def dense(
        cls,
        input,
        units,
        name,
        use_bias=False,
        trainable=True,
        reuse=False,
        const_init=False,
    ):
        name_ = name if reuse == False else name + "_reuse"

        in_shape = input.shape
        in_num_axes = len(in_shape)
        assert in_num_axes >= 2

        inputs = flow.reshape(input,
                              (-1, in_shape[-1])) if in_num_axes > 2 else input

        weight = flow.get_variable(
            name="{}-weight".format(name),
            shape=(units, inputs.shape[1]),
            dtype=inputs.dtype,
            initializer=flow.random_normal_initializer(stddev=0.02)
            if not const_init else flow.constant_initializer(0.002),
            trainable=trainable,
            model_name="weight",
            reuse=reuse,
        )

        out = flow.matmul(
            a=inputs,
            b=weight,
            transpose_b=True,
            name=name_ + "matmul",
        )

        if use_bias:
            bias = flow.get_variable(
                name="{}-bias".format(name),
                shape=(units, ),
                dtype=inputs.dtype,
                initializer=flow.random_normal_initializer()
                if not const_init else flow.constant_initializer(0.002),
                trainable=trainable,
                model_name="bias",
                reuse=reuse,
            )
            out = flow.nn.bias_add(out, bias, name=name_ + "_bias_add")

        out = flow.reshape(out, in_shape[:-1] +
                           (units, )) if in_num_axes > 2 else out
        return out
Example #16
0
    def get_extended_attention_mask(
        self, attention_mask, from_seq_length, to_seq_length
    ):
        output = flow.cast(attention_mask, dtype=flow.float32)
        output = flow.reshape(output, [-1, 1, to_seq_length])
        # broadcast `from_tensor` from 2D to 3D
        output = output.expand(-1, from_seq_length, -1)

        attention_mask = flow.reshape(output, [-1, 1, from_seq_length, to_seq_length])
        attention_mask = flow.cast(attention_mask, dtype=flow.float32)
        addr_blob = (attention_mask - 1.0) * 10000.0
        return addr_blob
Example #17
0
def channel_shuffle(x: Tensor, groups: int) -> Tensor:
    batchsize, num_channels, height, width = x.size()
    channels_per_group = num_channels // groups

    # reshape
    x = flow.reshape(x, [batchsize, groups, channels_per_group, height, width])

    x = flow.transpose(x, 1, 2)

    # flatten
    x = flow.reshape(x, [batchsize, -1, height, width])

    return x
Example #18
0
def SQuAD(
    input_ids_blob,
    input_mask_blob,
    token_type_ids_blob,
    vocab_size,
    seq_length=512,
    hidden_size=768,
    num_hidden_layers=12,
    num_attention_heads=12,
    intermediate_size=3072,
    hidden_act="gelu",
    hidden_dropout_prob=0.1,
    attention_probs_dropout_prob=0.1,
    max_position_embeddings=512,
    type_vocab_size=16,
    initializer_range=0.02,
):

    backbone = bert_util.BertBackbone(
        input_ids_blob=input_ids_blob,
        input_mask_blob=input_mask_blob,
        token_type_ids_blob=token_type_ids_blob,
        vocab_size=vocab_size,
        seq_length=seq_length,
        hidden_size=hidden_size,
        num_hidden_layers=num_hidden_layers,
        num_attention_heads=num_attention_heads,
        intermediate_size=intermediate_size,
        hidden_act=hidden_act,
        hidden_dropout_prob=hidden_dropout_prob,
        attention_probs_dropout_prob=attention_probs_dropout_prob,
        max_position_embeddings=max_position_embeddings,
        type_vocab_size=type_vocab_size,
        initializer_range=initializer_range,
    )

    with flow.scope.namespace("cls-squad"):
        final_hidden = backbone.sequence_output()
        final_hidden_matrix = flow.reshape(final_hidden, [-1, hidden_size])
        logits = bert_util._FullyConnected(
            final_hidden_matrix,
            hidden_size,
            units=2,
            weight_initializer=bert_util.CreateInitializer(initializer_range),
            name='output')
        logits = flow.reshape(logits, [-1, seq_length, 2])

        start_logits = flow.slice(logits, [None, None, 0], [None, None, 1])
        end_logits = flow.slice(logits, [None, None, 1], [None, None, 1])

    return start_logits, end_logits
 def DynamicReshapeJob(x: oft.ListNumpy.Placeholder(data_shape)):
     reshape_out1 = flow.reshape(x, (-1, 20))
     my_model = flow.get_variable(
         "my_model",
         shape=(20, 32),
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(minval=-10, maxval=10),
         trainable=True,
     )
     my_model = flow.cast_to_current_logical_view(my_model)
     mm_out = flow.matmul(reshape_out1, my_model)
     reshape_out2 = flow.reshape(mm_out, (-1, 8, 4))
     flow.losses.add_loss(reshape_out2)
     return reshape_out1
 def loss_div(self, feature):
     branch = feature
     branch = flow.reshape(branch, (branch.shape[0], branch.shape[1],
                                    branch.shape[2] * branch.shape[3]))
     branch = flow.nn.softmax(branch, 2)
     branch = flow.reshape(branch, (branch.shape[0], branch.shape[1],
                                    feature.shape[2], feature.shape[2]))
     branch = self.ccmp(branch,
                        kernel_size=(1, self.cnum),
                        stride=(1, self.cnum))
     branch = flow.reshape(branch, (branch.shape[0], branch.shape[1],
                                    branch.shape[2] * branch.shape[3]))
     loss_dis = 1.0 - 1.0 * flow.math.reduce_mean(
         flow.math.reduce_sum(branch, 2)) / self.cnum  # set margin = 3.0
     return loss_dis
Example #21
0
def _CreateAddrFromAttentionMask(attention_mask_blob, from_seq_length,
                                 to_seq_length):
    attention_mask_blob = flow.reshape(attention_mask_blob,
                                       [-1, 1, from_seq_length, to_seq_length])
    attention_mask_blob = flow.cast(attention_mask_blob, dtype=flow.float)
    addr_blob = (attention_mask_blob - 1.0) * 10000.0
    return addr_blob
Example #22
0
    def linear(self, x):
        """ Computes logits by running x through a linear layer.

            Args:
              x: A float32 tensor with shape [batch_size, length, hidden_size]
            Returns:
              float32 tensor with shape [batch_size, length, vocab_size].
        """
        with flow.scope.namespace("presoftmax_linear"):
            batch_size = x.shape[0]
            length = x.shape[1]

            x = flow.reshape(x, [-1, self.hidden_size])
            logits = flow.matmul(x, self.embedding_table, transpose_b=True)

            return flow.reshape(logits, [batch_size, length, self.vocab_size])
def resnet50(images, args, trainable=True, training=True):
    weight_regularizer = flow.regularizers.l2(
        args.wd) if args.wd > 0.0 and args.wd < 1.0 else None
    builder = ResnetBuilder(weight_regularizer, trainable, training,
                            args.channel_last, args.fuse_bn_relu,
                            args.fuse_bn_add_relu)

    with flow.scope.namespace("Resnet"):
        stem = builder.resnet_stem(images)
        body = builder.resnet_conv_x_body(stem)
        pool5 = flow.nn.avg_pool2d(
            body,
            ksize=7,
            strides=1,
            padding="VALID",
            data_format=builder.data_format,
            name="pool5",
        )
        fc1001 = flow.layers.dense(
            flow.reshape(pool5, (pool5.shape[0], -1)),
            units=1000,
            use_bias=True,
            kernel_initializer=flow.variance_scaling_initializer(
                2, 'fan_in', 'random_normal'),
            bias_initializer=flow.zeros_initializer(),
            kernel_regularizer=weight_regularizer,
            bias_regularizer=weight_regularizer,
            trainable=trainable,
            name="fc1001",
        )
    return fc1001
Example #24
0
 def __call__(self, pic):
     # handle PIL Image
     if pic.mode == 'I':
         img = np.array(pic, np.int32, copy=False)
     elif pic.mode == 'I;16':
         img = np.array(pic, np.int16, copy=False)
     else:
         img = np.frombuffer(pic.tobytes(), detype=np.byte)
     # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
     if pic.mode == 'YCbCr':
         nchannel = 3
     elif pic.mode == 'I;16':
         nchannel = 1
     else:
         nchannel = len(pic.mode)
     img = flow.reshape(img, shape=[pic.size[1], pic.size[0], nchannel])
     # put it from HWC to CHW format
     # yikes, this transpose takes 80% of the loading time/CPU
     img = np.swapaxes(img, 0, 1)
     img = np.swapaxes(img, 0, 2)
     if isinstance(pic, np.ndarray):
         # handle numpy array
         img = flow.transpose(img, perm=[2, 0, 1])
         img = img.astype(np.float)
         img = img / self.norm_value
         return img
     else:
         return img
Example #25
0
def _GatherIndexes(sequence_blob, positions_blob, seq_length, hidden_size):
    output = flow.gather(params=sequence_blob,
                         indices=positions_blob,
                         axis=2,
                         batch_dims=2)
    output = flow.reshape(output, [-1, hidden_size])
    return output
Example #26
0
 def discriminator(self, img, const_init=False, trainable=True, reuse=False):
     # (n, 1, 28, 28)
     h0 = layers.conv2d(
         img,
         64,
         5,
         name="d_conv1",
         const_init=const_init,
         trainable=trainable,
         reuse=reuse,
     )
     h0 = flow.nn.leaky_relu(h0, 0.3)
     # h0 = flow.nn.dropout(h0, rate=0.3)
     # (n, 64, 14, 14)
     h1 = layers.conv2d(
         h0,
         128,
         5,
         name="d_conv2",
         const_init=const_init,
         trainable=trainable,
         reuse=reuse,
     )
     h1 = flow.nn.leaky_relu(h1, 0.3)
     # h1 = flow.nn.dropout(h1, rate=0.3)
     # (n, 128 * 7 * 7)
     out = flow.reshape(h1, (self.batch_size, -1))
     # (n, 1)
     out = layers.dense(
         out, 1, name="d_fc", const_init=const_init, trainable=trainable, reuse=reuse
     )
     return out
def lenet(data, train=False):
    initializer = flow.truncated_normal(0.1)
    conv1 = flow.layers.conv2d(
        data,
        32,
        5,
        padding="SAME",
        activation=flow.nn.relu,
        name="conv1",
        kernel_initializer=initializer,
    )
    pool1 = flow.nn.max_pool2d(conv1, ksize=2, strides=2, padding="SAME", name="pool1")
    conv2 = flow.layers.conv2d(
        pool1,
        64,
        5,
        padding="SAME",
        activation=flow.nn.relu,
        name="conv2",
        kernel_initializer=initializer,
    )
    pool2 = flow.nn.max_pool2d(conv2, ksize=2, strides=2, padding="SAME", name="pool2")
    reshape = flow.reshape(pool2, [pool2.shape[0], -1])
    hidden = flow.layers.dense(
        reshape,
        512,
        activation=flow.nn.relu,
        kernel_initializer=initializer,
        name="dense1",
    )
    if train:
        hidden = flow.nn.dropout(hidden, rate=0.5, name="dropout")
    return flow.layers.dense(hidden, 10, kernel_initializer=initializer, name="dense2")
 def DynamicReshapeJob(x: oft.ListNumpy.Placeholder(data_shape)):
     reshape_out1 = flow.reshape(x, (-1, 20))
     my_model = flow.get_variable(
         "my_model",
         shape=(20, 32),
         dtype=flow.float,
         initializer=flow.random_uniform_initializer(minval=-10, maxval=10),
         trainable=True,
     )
     my_model = flow.cast_to_current_logical_view(my_model)
     mm_out = flow.matmul(reshape_out1, my_model)
     reshape_out2 = flow.reshape(mm_out, (-1, 8, 4))
     flow.optimizer.SGD(
         flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
     ).minimize(reshape_out2)
     return reshape_out1
Example #29
0
    def forward(self, inputs: flow.Tensor) -> flow.Tensor:
        net = self.conv2d_1a_3x3(inputs)
        net = self.conv2d_2a_3x3(net)
        net = self.conv2d_2b_3x3(net)
        net = self.MaxPool_3a_3x3(net)
        net = self.conv2d_3b_1x1(net)
        net = self.conv2d_4a_3x3(net)
        net = self.MaxPool_5a_3x3(net)  # stem

        net = self.Mixed_5b(net)
        net = self.block35(net)

        netB1 = self.conv_ls1(net)
        netB1 = self.MaxPool_3x3_ls1(netB1)

        net = self.Mixed_6a(net)
        net = self.block17(net)

        netB2 = self.conv_ls2(net)
        net = self.Mixed_7a(net)
        net = self.block8(net)

        netB3 = [netB1, netB2, net]

        netAll = flow.cat(netB3, 1)
        netAll = self.conv_ls3(netAll)

        net = self.Conv2d_7b_1x1(netAll)
        net = self.AvgPool_1a_8x8(net)
        net = flow.reshape(net, [net.shape[0], -1])

        hidden = self.dense(net)
        hidden = self.relu(hidden)

        return hidden
def train_job(
    images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
    labels: tp.Numpy.Placeholder((BATCH_SIZE, ), dtype=flow.int32),
) -> tp.Numpy:
    with flow.scope.placement("cpu", "0:0"):
        initializer = flow.truncated_normal(0.1)
        reshape = flow.reshape(images, [images.shape[0], -1])
        hidden = flow.layers.dense(
            reshape,
            512,
            activation=flow.nn.relu,
            kernel_initializer=initializer,
            name="dense1",
        )
        dense2 = flow.layers.dense(hidden,
                                   10,
                                   kernel_initializer=initializer,
                                   name="dense2")

        dense3 = flow.layers.dense(dense2,
                                   10,
                                   kernel_initializer=initializer,
                                   name="dense3")
        loss = flow.nn.sparse_softmax_cross_entropy_with_logits(labels, dense3)

    lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.1])
    flow.optimizer.SGD(lr_scheduler, momentum=0).minimize(loss)

    return loss