def main(args):
    # Fix random seeds and threads
    np.random.seed(args.seed)
    tf.random.set_seed(args.seed)
    tf.config.threading.set_inter_op_parallelism_threads(args.threads)
    tf.config.threading.set_intra_op_parallelism_threads(args.threads)

    # Create logdir name
    args.logdir = os.path.join(
        "logs", "{}-{}-{}".format(
            os.path.basename(globals().get("__file__", "notebook")),
            datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S"), ",".join(
                ("{}={}".format(re.sub("(.)[^_]*_?", r"\1", key), value)
                 for key, value in sorted(vars(args).items())))))

    # Load the data
    cags = CAGS()

    # Load the EfficientNet-B0 model
    efficientnet_b0 = efficient_net.pretrained_efficientnet_b0(
        include_top=False)

    # TODO: Create the model and train it
    model = ...

    # Generate test set annotations, but in args.logdir to allow parallel execution.
    with open(os.path.join(args.logdir, "cags_classification.txt"),
              "w",
              encoding="utf-8") as predictions_file:
        # TODO: Predict the probabilities on the test set
        test_probabilities = model.predict(...)

        for probs in test_probabilities:
            print(np.argmax(probs), file=predictions_file)
def main(args):
    # Fix random seeds and threads
    np.random.seed(args.seed)
    tf.random.set_seed(args.seed)
    tf.config.threading.set_inter_op_parallelism_threads(args.threads)
    tf.config.threading.set_intra_op_parallelism_threads(args.threads)

    # Load EfficientNet=B0
    efficientnet_b0 = efficient_net.pretrained_efficientnet_b0(include_top=True)

    for image_path in args.images:
        # Load the file
        with open(image_path, "rb") as image_file:
            image = tf.image.decode_image(image_file.read(), channels=3, dtype=tf.float32)

        # Resize to 224,224
        image = tf.image.resize(image, size=(224, 224))

        # Compute the prediction
        start = time.time()
        [prediction], *_ = efficientnet_b0.predict(tf.expand_dims(image, 0))
        print("Image {} [{} ms]: label {}".format(
            image_path,
            1000 * (time.time() - start),
            imagenet_classes.imagenet_classes[tf.argmax(prediction)]
        ))
Esempio n. 3
0
def main(args: argparse.Namespace) -> None:
    # Fix random seeds and threads
    tf.keras.utils.set_random_seed(args.seed)
    tf.config.threading.set_inter_op_parallelism_threads(args.threads)
    tf.config.threading.set_intra_op_parallelism_threads(args.threads)

    # Create logdir name
    args.logdir = os.path.join(
        "logs", "{}-{}-{}".format(
            os.path.basename(globals().get("__file__", "notebook")),
            datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S"), ",".join(
                ("{}={}".format(re.sub("(.)[^_]*_?", r"\1", k), v)
                 for k, v in sorted(vars(args).items())))))

    # Load the data
    cags = CAGS()

    # Load the EfficientNet-B0 model
    efficientnet_b0 = efficient_net.pretrained_efficientnet_b0(
        include_top=False)

    # TODO: Create the model and train it
    model = ...

    # Generate test set annotations, but in `args.logdir` to allow parallel execution.
    os.makedirs(args.logdir, exist_ok=True)
    with open(os.path.join(args.logdir, "cags_segmentation.txt"),
              "w",
              encoding="utf-8") as predictions_file:
        # TODO: Predict the masks on the test set
        test_masks = model.predict(...)

        for mask in test_masks:
            zeros, ones, runs = 0, 0, []
            for pixel in np.reshape(mask >= 0.5, [-1]):
                if pixel:
                    if zeros or (not zeros and not ones):
                        runs.append(zeros)
                        zeros = 0
                    ones += 1
                else:
                    if ones:
                        runs.append(ones)
                        ones = 0
                    zeros += 1
            runs.append(zeros + ones)
            print(*runs, file=predictions_file)
Esempio n. 4
0
    def efficient_net(self, args):

        efficientnet_b0 = efficient_net.pretrained_efficientnet_b0(
            include_top=False, drop_connect=args.drop_connect)

        for lidx, layer in enumerate(efficientnet_b0.layers):
            if ('conv' in layer.name or 'expand' in layer.name
                    or 'reduce' in layer.name):
                layer.kernel_regularizer = tf.keras.regularizers.l2(args.l2)

            if ('drop' in layer.name or 'bg' in layer.name
                    or 'gn' in layer.name):
                layer.trainable = True
            else:
                layer.trainable = False

        return efficientnet_b0
def main(args):
    # Fix random seeds and threads
    np.random.seed(args.seed)
    tf.random.set_seed(args.seed)
    tf.config.threading.set_inter_op_parallelism_threads(args.threads)
    tf.config.threading.set_intra_op_parallelism_threads(args.threads)

    # Create logdir name
    args.logdir = os.path.join(
        "logs", "{}-{}-{}".format(
            os.path.basename(globals().get("__file__", "notebook")),
            datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S"), ",".join(
                ("{}={}".format(re.sub("(.)[^_]*_?", r"\1", key), value)
                 for key, value in sorted(vars(args).items())))))

    # Load the data
    svhn = SVHN()

    # Load the EfficientNet-B0 model
    efficientnet_b0 = efficient_net.pretrained_efficientnet_b0(
        include_top=False)

    # TODO: Create the model and train it
    model = ...

    # Generate test set annotations, but in args.logdir to allow parallel execution.
    os.makedirs(args.logdir, exist_ok=True)
    with open(os.path.join(args.logdir, "svhn_competition.txt"),
              "w",
              encoding="utf-8") as predictions_file:
        # TODO: Predict the digits and their bounding boxes on the test set.
        # Assume that for a single test image we get
        # - `predicted_classes`: a 1D array with the predicted digits,
        # - `predicted_bboxes`: a [len(predicted_classes), 4] array with bboxes;
        for predicted_classes, predicted_bboxes in ...:
            output = []
            for label, bbox in zip(predicted_classes, predicted_bboxes):
                output += [label] + list(bbox)
            print(*output, file=predictions_file)
Esempio n. 6
0
    def __init__(self, args):
        efficientnet_b0 = efficient_net.pretrained_efficientnet_b0(
            include_top=False)

        hidden = efficientnet_b0.outputs

        # MASK LAYER
        # Pyramid block 1
        #bn1 = layers.BatchNormalization()(hidden[1])
        #conv1 = layers.Conv2D(filters=64, kernel_size=(3,3), strides = 1, padding='same', use_bias=False, activation = tf.nn.relu)(bn1)
        #bn2 = layers.BatchNormalization()(conv1)
        #conv2 = layers.Conv2D(filters=64, kernel_size=(3,3), strides = 1, padding='same', use_bias=False, activation = tf.nn.relu)(bn2)
        #shortcut1 = layers.Conv2D(filters=64, kernel_size=(1,1), strides = 1, padding='same', use_bias=False, activation = tf.nn.relu)(bn1)
        #out1 = layers.add([conv2, shortcut1])

        # Pyramid block 2
        #bn3 = layers.BatchNormalization()(out1)
        #conv3 = layers.Conv2D(filters=32, kernel_size=(3,3), strides = 1, padding='same', use_bias=False, activation = tf.nn.relu)(bn3)
        #bn4 = layers.BatchNormalization()(conv3)
        #conv4 = layers.Conv2D(filters=32, kernel_size=(3,3), strides = 1, padding='same', use_bias=False, activation = tf.nn.relu)(bn4)
        #shortcut2 = layers.Conv2D(filters=32, kernel_size=(1,1), strides = 1, padding='same', use_bias=False, activation = tf.nn.relu)(bn3)
        #out2 = layers.add([conv4, shortcut2])

        #bn5 = layers.BatchNormalization()(out2)
        #conv5 = layers.Conv2D(filters=64, kernel_size=(3,3), strides = 1, padding='same', use_bias=False, activation = tf.nn.relu)(bn5)
        #bn6 = layers.BatchNormalization()(conv5)
        #conv6 = layers.Conv2D(filters=64, kernel_size=(3,3), strides = 1, padding='same', use_bias=False, activation = tf.nn.relu)(bn6)
        #shortcut3 = layers.Conv2D(filters=64, kernel_size=(1,1), strides = 1, padding='same', use_bias=False, activation = tf.nn.relu)(bn5)
        #out3 = layers.add([conv6, shortcut3])
        #tmp = tf.keras.layers.MaxPooling2D(pool_size=(2,2), strides=2)(out3)
        #tmp = layers.BatchNormalization()(tmp)

        EFNet_out = efficientnet_b0.outputs[1:][::-1]
        down_stack = tf.keras.Model(inputs=efficientnet_b0.inputs,
                                    outputs=EFNet_out)

        print(EFNet_out)

        inputs = layers.Input(shape=[224, 224, 3])

        x = inputs
        skips = down_stack(x)
        print(skips)

        x = skips[-1]
        skips = reversed(skips[:-1])

        print(skips)

        up_stack = [
            upsample(512, 3),
            upsample(256, 3),
            upsample(128, 3),
            upsample(64, 3)
        ]

        for up, skip in zip(up_stack, skips):
            x = up(x)
            layers.Concatenate()([x, skip])

    # x = layers.BatchNormalization()(x)
        masks_layer = layers.Conv2DTranspose(filters=1,
                                             kernel_size=3,
                                             strides=2,
                                             padding="same",
                                             use_bias=True,
                                             activation=tf.nn.sigmoid)(x)

        super().__init__(inputs=inputs, outputs=[masks_layer])

        opt = tf.keras.optimizers.RMSprop(args.opt[1]) if args.opt[0] == 'rms' \
            else tf.keras.optimizers.Adam()

        self.compile(
            optimizer=opt,
            loss=[
                #tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
                tf.keras.losses.BinaryCrossentropy()
            ],
            metrics=[CAGSMaskIoU()])
    # Report only errors by default
    if not args.verbose:
        os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"

    # Create logdir name
    args.logdir = os.path.join("logs", "{}-{}-{}".format(
        os.path.basename(globals().get("__file__", "notebook")),
        datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S"),
        ",".join(("{}={}".format(re.sub("(.)[^_]*_?", r"\1", key), value) for key, value in sorted(vars(args).items())))
    ))

    # Load the data
    cags = CAGS()

    # Load the EfficientNet-B0 model
    efficientnet_b0 = efficient_net.pretrained_efficientnet_b0(include_top=False)


    def train_augment(e):
        image = e["image"]
        label = e["label"]
        if tf.random.uniform([]) >= 0.5:
            image = tf.image.flip_left_right(image)
        image = tf.image.resize_with_crop_or_pad(image, CAGS.H + 12, CAGS.W + 12)
        image = tf.image.resize(image, [tf.random.uniform([], minval=CAGS.H, maxval=CAGS.H + 24, dtype=tf.int32),
                                        tf.random.uniform([], minval=CAGS.W, maxval=CAGS.W + 24, dtype=tf.int32)])
        image = tf.image.random_crop(image, [CAGS.H, CAGS.W, CAGS.C])
        return image, label


    train = cags.train.map(CAGS.parse)
Esempio n. 8
0
def main(args):
    # Fix random seeds and threads
    np.random.seed(args.seed)
    tf.random.set_seed(args.seed)
    tf.config.threading.set_inter_op_parallelism_threads(args.threads)
    tf.config.threading.set_intra_op_parallelism_threads(args.threads)

    # Create logdir name
    args.logdir = os.path.join("logs", "{}-{}-{}".format(
        os.path.basename(globals().get("__file__", "notebook")),
        datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S"),
        ",".join(("{}={}".format(re.sub("(.)[^_]*_?", r"\1", key), value) for key, value in sorted(vars(args).items())))
    ))

    # Load the data
    cags = CAGS()

    # Load the EfficientNet-B0 model
    # Return with image features, not with classification layer
    efficientnet_b0 = efficient_net.pretrained_efficientnet_b0(include_top=False)

    def without_mask(x):
        return x["image"], x["label"]

    train = cags.train
    train = train.map(without_mask)

    a = []
    [a.append(row[1]) for row in train]
    # output classification size
    output_size = max(a).numpy() + 1
    # length of training data - 2142 rows
    set_size = len(a)

    train = train.shuffle(set_size, seed=args.seed)
    train = train.batch(args.batch_size)

    dev = cags.dev
    dev = dev.map(without_mask)
    dev = dev.batch(args.batch_size)

    test = cags.test
    test = test.map(without_mask)
    test = test.batch(args.batch_size)

    np.random.seed(args.seed)
    tf.random.set_seed(args.seed)

    # Adding last additional dense layer
    inter_input = tf.keras.layers.Dense(5000, activation='relu')(efficientnet_b0.outputs[0])
    output = tf.keras.layers.Dense(output_size, activation='softmax')(inter_input)

    # TODO: Create the model and train it
    model = tf.keras.Model(inputs=efficientnet_b0.inputs, outputs=output)
    # model.summary()

    # Define a learning rate decay
    initial_lr = 0.0001
    final_lr = 0.00000001
    lr = tf.keras.optimizers.schedules.PolynomialDecay(initial_lr, args.epochs * set_size / args.batch_size, final_lr)

    model.compile(
        optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
        loss=tf.keras.losses.SparseCategoricalCrossentropy(),
        metrics=tf.keras.metrics.SparseCategoricalAccuracy(),
    )

    model.fit(
        train,
        shuffle=True,
        epochs=args.epochs,
        validation_data=dev, verbose=2
    )

    # Generate test set annotations, but in args.logdir to allow parallel execution.
    os.makedirs(args.logdir, exist_ok=True)
    with open(os.path.join(args.logdir, "cags_classification.txt"), "w", encoding="utf-8") as predictions_file:
        # TODO: Predict the probabilities on the test set
        test_probabilities = model.predict(test)

        for probs in test_probabilities:
            print(np.argmax(probs), file=predictions_file)
Esempio n. 9
0
    if not args.verbose:
        os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"

    # Create logdir name
    args.logdir = os.path.join(
        "logs", "{}-{}-{}".format(
            os.path.basename(globals().get("__file__", "notebook")),
            datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S"), ",".join(
                ("{}={}".format(re.sub("(.)[^_]*_?", r"\1", key), value)
                 for key, value in sorted(vars(args).items())))))

    # Load the data
    svhn = SVHN()

    # Load the EfficientNet-B0 model
    efficientnet_b0 = efficient_net.pretrained_efficientnet_b0(
        include_top=False, dynamic_shape=False)

    # TODO: Create the model and train it
    model = ...

    # Generate test set annotations, but in args.logdir to allow parallel execution.
    with open(os.path.join(args.logdir, "svhn_classification.txt"),
              "w",
              encoding="utf-8") as out_file:
        # TODO: Predict the digits and their bounding boxes on the test set.
        for prediction in model.predict(...):
            # Assume that for the given prediction we get its
            # - `predicted_classes`: a 1D array with the predicted digits,
            # - `predicted_bboxes`: a [len(predicted_classes), 4] array with bboxes;
            # We can then generate the required output by
            output = []
Esempio n. 10
0
def main(args):
    # Fix random seeds and threads
    np.random.seed(args.seed)
    tf.random.set_seed(args.seed)
    tf.config.threading.set_inter_op_parallelism_threads(args.threads)
    tf.config.threading.set_intra_op_parallelism_threads(args.threads)

    # Create logdir name
    args.logdir = os.path.join(
        "logs", "{}-{}-{}".format(
            os.path.basename(globals().get("__file__", "notebook")),
            datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S"), ",".join(
                ("{}={}".format(re.sub("(.)[^_]*_?", r"\1", key), value)
                 for key, value in sorted(vars(args).items())))))

    # Load the data
    cags = CAGS()

    def with_mask(x):
        return x["image"], x["mask"]

    train = cags.train
    train = train.map(with_mask)
    train = train.shuffle(5000, seed=args.seed)
    train = train.batch(args.batch_size)

    dev = cags.dev
    dev = dev.map(with_mask)
    dev = dev.batch(args.batch_size)

    test = cags.test
    test = test.map(with_mask)
    test = test.batch(args.batch_size)

    np.random.seed(args.seed)
    tf.random.set_seed(args.seed)

    # Load the EfficientNet-B0 model
    efficientnet_b0 = efficient_net.pretrained_efficientnet_b0(
        include_top=False)
    efficientnet_b0.trainable = False

    inputs = tf.keras.layers.Input([cags.H, cags.W, cags.C])

    # Lets use all outputs, except first one by networks. We'll use those which are intermediate results of the network
    # Not the final efficientnet output
    features = efficientnet_b0(inputs)

    # Lets use features for reconstruction of segmentation.
    f = features[1]
    f = tf.keras.layers.Conv2D(filters=128,
                               kernel_size=1,
                               padding='same',
                               use_bias=False)(f)
    f = tf.keras.layers.BatchNormalization()(f)
    f = tf.keras.layers.ReLU()(f)

    for feature in features[2:]:
        f = tf.keras.layers.Conv2DTranspose(filters=128,
                                            kernel_size=3,
                                            strides=2,
                                            padding='same',
                                            use_bias=False)(f)
        f = tf.keras.layers.BatchNormalization()(f)
        f = tf.keras.layers.ReLU()(f)
        f = tf.keras.layers.Dropout(rate=0.3)(f)

        f = tf.keras.layers.Conv2D(filters=128,
                                   kernel_size=3,
                                   padding='same',
                                   use_bias=False)(f)
        f = tf.keras.layers.BatchNormalization()(f)
        f = tf.keras.layers.ReLU()(f)
        f = tf.keras.layers.Dropout(rate=0.2)(f)

        f = tf.keras.layers.Conv2D(filters=128,
                                   kernel_size=3,
                                   padding='same',
                                   use_bias=False)(f)
        f = tf.keras.layers.BatchNormalization()(f)
        f = tf.keras.layers.ReLU()(f)
        f = tf.keras.layers.Dropout(rate=0.1)(f)

        f_1 = tf.keras.layers.Conv2D(filters=128,
                                     kernel_size=1,
                                     padding='same',
                                     use_bias=False)(feature)
        f_1 = tf.keras.layers.BatchNormalization()(f_1)
        f_1 = tf.keras.layers.ReLU()(f_1)
        f = tf.keras.layers.Dropout(rate=0.3)(f)
        f = tf.keras.layers.Add()([f, f_1])

    # Add last layer with transposed conv + sigmoid activation for having pixels in range [0,1]
    outputs = tf.keras.layers.Conv2DTranspose(filters=1,
                                              kernel_size=3,
                                              strides=2,
                                              padding='same',
                                              activation="sigmoid")(f)

    # TODO: Create the model and train it
    model = tf.keras.Model(inputs=inputs, outputs=outputs)

    # model.summary()

    # Custom PiecewiseConstantDecay - half the learning rate at each half
    def lr_schedule(epoch):
        if epoch > args.epochs * 3 / 4:
            lr = 0.00001
        elif epoch > args.epochs / 2:
            lr = 0.0001
        else:
            lr = 0.001
        return lr

    lr_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_schedule)

    # Compile the model
    model.compile(
        optimizer=tf.keras.optimizers.RMSprop(learning_rate=lr_schedule(0)),
        loss=tf.losses.Huber(),
        metrics=[cags.MaskIoUMetric()])

    # Fit the model
    model.fit(train,
              shuffle=True,
              epochs=args.epochs,
              callbacks=[lr_scheduler],
              validation_data=dev,
              verbose=2)

    # Generate test set annotations, but in args.logdir to allow parallel execution.
    os.makedirs(args.logdir, exist_ok=True)
    with open(os.path.join(args.logdir, "cags_segmentation.txt"),
              "w",
              encoding="utf-8") as predictions_file:
        # TODO: Predict the masks on the test set
        test_masks = model.predict(test)

        for mask in test_masks:
            zeros, ones, runs = 0, 0, []
            for pixel in np.reshape(mask >= 0.5, [-1]):
                if pixel:
                    if zeros or (not zeros and not ones):
                        runs.append(zeros)
                        zeros = 0
                    ones += 1
                else:
                    if ones:
                        runs.append(ones)
                        ones = 0
                    zeros += 1
            runs.append(zeros + ones)
            print(*runs, file=predictions_file)