Пример #1
0
def main(args):
    # Fix random seeds and threads
    np.random.seed(args.seed)
    tf.random.set_seed(args.seed)
    tf.config.threading.set_inter_op_parallelism_threads(args.threads)
    tf.config.threading.set_intra_op_parallelism_threads(args.threads)

    # Create logdir name
    args.logdir = os.path.join("logs", "{}-{}-{}".format(
        os.path.basename(globals().get("__file__", "notebook")),
        datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S"),
        ",".join(("{}={}".format(re.sub("(.)[^_]*_?", r"\1", key), value) for key, value in sorted(vars(args).items())))
    ))

    # Load the data
    cags = CAGS()

    # Load the EfficientNet-B0 model
    # Return with image features, not with classification layer
    efficientnet_b0 = efficient_net.pretrained_efficientnet_b0(include_top=False)

    def without_mask(x):
        return x["image"], x["label"]

    train = cags.train
    train = train.map(without_mask)

    a = []
    [a.append(row[1]) for row in train]
    # output classification size
    output_size = max(a).numpy() + 1
    # length of training data - 2142 rows
    set_size = len(a)

    train = train.shuffle(set_size, seed=args.seed)
    train = train.batch(args.batch_size)

    dev = cags.dev
    dev = dev.map(without_mask)
    dev = dev.batch(args.batch_size)

    test = cags.test
    test = test.map(without_mask)
    test = test.batch(args.batch_size)

    np.random.seed(args.seed)
    tf.random.set_seed(args.seed)

    # Adding last additional dense layer
    inter_input = tf.keras.layers.Dense(5000, activation='relu')(efficientnet_b0.outputs[0])
    output = tf.keras.layers.Dense(output_size, activation='softmax')(inter_input)

    # TODO: Create the model and train it
    model = tf.keras.Model(inputs=efficientnet_b0.inputs, outputs=output)
    # model.summary()

    # Define a learning rate decay
    initial_lr = 0.0001
    final_lr = 0.00000001
    lr = tf.keras.optimizers.schedules.PolynomialDecay(initial_lr, args.epochs * set_size / args.batch_size, final_lr)

    model.compile(
        optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
        loss=tf.keras.losses.SparseCategoricalCrossentropy(),
        metrics=tf.keras.metrics.SparseCategoricalAccuracy(),
    )

    model.fit(
        train,
        shuffle=True,
        epochs=args.epochs,
        validation_data=dev, verbose=2
    )

    # Generate test set annotations, but in args.logdir to allow parallel execution.
    os.makedirs(args.logdir, exist_ok=True)
    with open(os.path.join(args.logdir, "cags_classification.txt"), "w", encoding="utf-8") as predictions_file:
        # TODO: Predict the probabilities on the test set
        test_probabilities = model.predict(test)

        for probs in test_probabilities:
            print(np.argmax(probs), file=predictions_file)
Пример #2
0
    tf.config.threading.set_inter_op_parallelism_threads(args.threads)
    tf.config.threading.set_intra_op_parallelism_threads(args.threads)

    # Report only errors by default
    if not args.verbose:
        os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"

    # Create logdir name
    args.logdir = os.path.join("logs", "{}-{}-{}".format(
        os.path.basename(globals().get("__file__", "notebook")),
        datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S"),
        ",".join(("{}={}".format(re.sub("(.)[^_]*_?", r"\1", key), value) for key, value in sorted(vars(args).items())))
    ))

    # Load the data
    cags = CAGS()

    # Load the EfficientNet-B0 model
    efficientnet_b0 = efficient_net.pretrained_efficientnet_b0(include_top=False)

    # TODO: Create the model and train it
    model = ...

    # Generate test set annotations, but in args.logdir to allow parallel execution.
    with open(os.path.join(args.logdir, "cags_segmentation.txt"), "w", encoding="utf-8") as out_file:
        # TODO: Predict the masks on the test set
        test_masks = model.predict(...)
        for mask in test_masks:
            zeros, ones, runs = 0, 0, []
            for pixel in np.reshape(mask >= 0.5, [-1]):
                if pixel:
                        action="store_true",
                        help="Verbose TF logging.")
    args = parser.parse_args([] if "__file__" not in globals() else None)

    # Fix random seeds and threads
    np.random.seed(args.seed)
    tf.random.set_seed(args.seed)
    tf.config.threading.set_inter_op_parallelism_threads(args.threads)
    tf.config.threading.set_intra_op_parallelism_threads(args.threads)

    # Report only errors by default
    if not args.verbose:
        os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"

    # Load the gold data
    gold_masks = getattr(CAGS(), args.dataset).map(
        CAGS.parse).map(lambda example: example["mask"])

    # Create the metric
    iou = CAGSMaskIoU()

    # Read the predictions
    with open(args.predictions, "r", encoding="utf-8-sig") as predictions_file:
        for gold_mask in gold_masks:
            predicted_runs = [
                int(run) for run in predictions_file.readline().split()
            ]
            assert sum(predicted_runs) == CAGS.H * CAGS.W

            predicted_mask = np.zeros([CAGS.H * CAGS.W], np.int32)
            offset = 0
Пример #4
0
def main(args):
    # Fix random seeds and threads
    np.random.seed(args.seed)
    tf.random.set_seed(args.seed)
    tf.config.threading.set_inter_op_parallelism_threads(args.threads)
    tf.config.threading.set_intra_op_parallelism_threads(args.threads)

    # Create logdir name
    args.logdir = os.path.join(
        "logs", "{}-{}-{}".format(
            os.path.basename(globals().get("__file__", "notebook")),
            datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S"), ",".join(
                ("{}={}".format(re.sub("(.)[^_]*_?", r"\1", key), value)
                 for key, value in sorted(vars(args).items())))))

    # Load the data
    cags = CAGS()

    def with_mask(x):
        return x["image"], x["mask"]

    train = cags.train
    train = train.map(with_mask)
    train = train.shuffle(5000, seed=args.seed)
    train = train.batch(args.batch_size)

    dev = cags.dev
    dev = dev.map(with_mask)
    dev = dev.batch(args.batch_size)

    test = cags.test
    test = test.map(with_mask)
    test = test.batch(args.batch_size)

    np.random.seed(args.seed)
    tf.random.set_seed(args.seed)

    # Load the EfficientNet-B0 model
    efficientnet_b0 = efficient_net.pretrained_efficientnet_b0(
        include_top=False)
    efficientnet_b0.trainable = False

    inputs = tf.keras.layers.Input([cags.H, cags.W, cags.C])

    # Lets use all outputs, except first one by networks. We'll use those which are intermediate results of the network
    # Not the final efficientnet output
    features = efficientnet_b0(inputs)

    # Lets use features for reconstruction of segmentation.
    f = features[1]
    f = tf.keras.layers.Conv2D(filters=128,
                               kernel_size=1,
                               padding='same',
                               use_bias=False)(f)
    f = tf.keras.layers.BatchNormalization()(f)
    f = tf.keras.layers.ReLU()(f)

    for feature in features[2:]:
        f = tf.keras.layers.Conv2DTranspose(filters=128,
                                            kernel_size=3,
                                            strides=2,
                                            padding='same',
                                            use_bias=False)(f)
        f = tf.keras.layers.BatchNormalization()(f)
        f = tf.keras.layers.ReLU()(f)
        f = tf.keras.layers.Dropout(rate=0.3)(f)

        f = tf.keras.layers.Conv2D(filters=128,
                                   kernel_size=3,
                                   padding='same',
                                   use_bias=False)(f)
        f = tf.keras.layers.BatchNormalization()(f)
        f = tf.keras.layers.ReLU()(f)
        f = tf.keras.layers.Dropout(rate=0.2)(f)

        f = tf.keras.layers.Conv2D(filters=128,
                                   kernel_size=3,
                                   padding='same',
                                   use_bias=False)(f)
        f = tf.keras.layers.BatchNormalization()(f)
        f = tf.keras.layers.ReLU()(f)
        f = tf.keras.layers.Dropout(rate=0.1)(f)

        f_1 = tf.keras.layers.Conv2D(filters=128,
                                     kernel_size=1,
                                     padding='same',
                                     use_bias=False)(feature)
        f_1 = tf.keras.layers.BatchNormalization()(f_1)
        f_1 = tf.keras.layers.ReLU()(f_1)
        f = tf.keras.layers.Dropout(rate=0.3)(f)
        f = tf.keras.layers.Add()([f, f_1])

    # Add last layer with transposed conv + sigmoid activation for having pixels in range [0,1]
    outputs = tf.keras.layers.Conv2DTranspose(filters=1,
                                              kernel_size=3,
                                              strides=2,
                                              padding='same',
                                              activation="sigmoid")(f)

    # TODO: Create the model and train it
    model = tf.keras.Model(inputs=inputs, outputs=outputs)

    # model.summary()

    # Custom PiecewiseConstantDecay - half the learning rate at each half
    def lr_schedule(epoch):
        if epoch > args.epochs * 3 / 4:
            lr = 0.00001
        elif epoch > args.epochs / 2:
            lr = 0.0001
        else:
            lr = 0.001
        return lr

    lr_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_schedule)

    # Compile the model
    model.compile(
        optimizer=tf.keras.optimizers.RMSprop(learning_rate=lr_schedule(0)),
        loss=tf.losses.Huber(),
        metrics=[cags.MaskIoUMetric()])

    # Fit the model
    model.fit(train,
              shuffle=True,
              epochs=args.epochs,
              callbacks=[lr_scheduler],
              validation_data=dev,
              verbose=2)

    # Generate test set annotations, but in args.logdir to allow parallel execution.
    os.makedirs(args.logdir, exist_ok=True)
    with open(os.path.join(args.logdir, "cags_segmentation.txt"),
              "w",
              encoding="utf-8") as predictions_file:
        # TODO: Predict the masks on the test set
        test_masks = model.predict(test)

        for mask in test_masks:
            zeros, ones, runs = 0, 0, []
            for pixel in np.reshape(mask >= 0.5, [-1]):
                if pixel:
                    if zeros or (not zeros and not ones):
                        runs.append(zeros)
                        zeros = 0
                    ones += 1
                else:
                    if ones:
                        runs.append(ones)
                        ones = 0
                    zeros += 1
            runs.append(zeros + ones)
            print(*runs, file=predictions_file)
    def train(self,
              train_dataset,
              val_dataset,
              learning_rate,
              epochs,
              augment=True,
              momentum=None,
              repeat_augmented=None,
              finetune_efficient_net_from_layer=None):  # 225

        assert self.mode == "training", "You need to have model in training mode."

        self.set_log_files()

        train_dataset_generator = CAGS.create_dataset(
            train_dataset,
            self.config.batch_size,
            shuffle=True,
            augment=augment,
            repeat_augmented=repeat_augmented)
        val_dataset_generator = CAGS.create_dataset(val_dataset,
                                                    self.config.batch_size,
                                                    shuffle=True,
                                                    augment=False)

        # Create log_dir if it does not exist
        if not os.path.exists(self.log_dir):  # logs/
            os.makedirs(self.log_dir)

        # Callbacks # todo -- what to monitor >>>
        callbacks = [
            tf.keras.callbacks.TensorBoard(log_dir=self.log_dir,
                                           histogram_freq=0,
                                           write_graph=True,
                                           write_images=False),
            tf.keras.callbacks.ModelCheckpoint(self.checkpoint_path,
                                               verbose=0,
                                               save_weights_only=True,
                                               save_freq='epoch'),
        ]
        # Train
        self.log("\nStarting at epoch {}. LR={}\n".format(
            self.epoch, learning_rate))
        self.log("Checkpoint Path: {}".format(self.checkpoint_path))

        self.set_efficientnet_trainable(finetune_efficient_net_from_layer)
        self.compile_model(learning_rate, momentum)

        try:
            self.model.fit(
                # train_generator should be
                # inputs = [batch_images, batch_gt_class_ids, batch_gt_masks]
                # outputs = []
                train_dataset_generator,
                initial_epoch=self.epoch,
                epochs=epochs,
                # steps_per_epoch=self.config.STEPS_PER_EPOCH,
                callbacks=callbacks,
                validation_data=val_dataset_generator
                # validation_steps=self.config.VALIDATION_STEPS,
                # max_queue_size=100,
            )

        except KeyboardInterrupt:
            print('Interrupted')
            return
Пример #6
0
    parser.add_argument("--threads", default=1, type=int, help="Maximum number of threads to use.")
    parser.add_argument("--verbose", default=False, action="store_true", help="Verbose TF logging.")
    args = parser.parse_args([] if "__file__" not in globals() else None)

    # Fix random seeds and threads
    np.random.seed(args.seed)
    tf.random.set_seed(args.seed)
    tf.config.threading.set_inter_op_parallelism_threads(args.threads)
    tf.config.threading.set_intra_op_parallelism_threads(args.threads)

    # Report only errors by default
    if not args.verbose:
        os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"

    # Load the gold data
    gold_masks = getattr(CAGS(), args.dataset).map(CAGS.parse).map(lambda example: example["mask"])

    # Create the metric
    iou = CAGSMaskIoU()

    # Read the predictions
    with open(args.predictions, "r", encoding="utf-8-sig") as predictions_file:
        for gold_mask in gold_masks:
            predicted_runs = [int(run) for run in predictions_file.readline().split()]
            assert sum(predicted_runs) == CAGS.H * CAGS.W

            predicted_mask = np.zeros([CAGS.H * CAGS.W], np.int32)
            offset = 0
            for i, run in enumerate(predicted_runs):
                predicted_mask[offset:offset + run] = i % 2
                offset += run
    tf.config.threading.set_intra_op_parallelism_threads(args.threads)
    # Report only errors by default
    if not args.verbose:
        os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
    # Create logdir name
    args.logdir = os.path.join(
        "logs", "{}-{}-{}".format(
            os.path.basename(globals().get("__file__", "notebook")),
            datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S"), ",".join(
                ("{}={}".format(re.sub("(.)[^_]*_?", r"\1", key), value)
                 for key, value in sorted(vars(args).items())))))

    # ###############
    # LOAD THE DATA #
    # ###############
    cags = CAGS()
    train_dataset = CAGS.create_dataset(args.batch_size, augment=True)
    dev_dataset = CAGS.create_dataset(args.batch_size, augment=False)
    test_dataset = CAGS.create_dataset(args.batch_size, augment=False)

    # ################################
    # Load the EfficientNet-B0 model #
    # ################################
    efficientnet_b0 = efficient_net.pretrained_efficientnet_b0(
        include_top=False, dynamic_shape=False)
    efficientnet_b0.trainable = False

    if args.finetune == "top":
        for layer in efficientnet_b0.layers[225:]:  # top conv + 7a last conv
            # print(layer.name)
            layer.trainable = True