예제 #1
0
# Set some seeds.
use_cuda = use_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {"num_workers": multiprocessing.cpu_count(), "pin_memory": True} if use_cuda else {}


# Set up GPU training and some training parameters.
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)


# Set up the dataset and the DataLoader.
transform = transforms.Compose([transforms.ToTensor()])
train_dataset = ShapesDataset(transform=transform, csv=train_csv)
test_dataset = ShapesDataset(transform=transform, csv=test_csv)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)


# Instantiate the model and optimizer.
model = BetaVAE().to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)


# Execute model training and testing.
if __name__ == "__main__":
    start_epoch = model.load_last_model(ckpt_dir) + 1
    train_losses, test_losses = utils.read_log(log_dir, ([], []))
    
예제 #2
0
    np.random.seed(10101)
    np.random.shuffle(imglist)
    train_imglist = imglist[:int(count * 0.9)]
    val_imglist = imglist[int(count * 0.9):]

    MODEL_DIR = "logs"

    COCO_MODEL_PATH = "model_data/mask_rcnn_coco.h5"
    config = ShapesConfig()
    # 计算训练集和验证集长度
    config.STEPS_PER_EPOCH = len(train_imglist)
    config.VALIDATION_STEPS = len(val_imglist)
    config.display()

    # 训练数据集准备
    dataset_train = ShapesDataset()
    dataset_train.load_shapes(len(train_imglist), img_floder, mask_floder,
                              train_imglist, yaml_floder)
    dataset_train.prepare()

    # 验证数据集准备
    dataset_val = ShapesDataset()
    dataset_val.load_shapes(len(val_imglist), img_floder, mask_floder,
                            val_imglist, yaml_floder)
    dataset_val.prepare()

    # 获得训练模型
    model = get_train_model(config)
    model.summary()
    model.load_weights(COCO_MODEL_PATH, by_name=True, skip_mismatch=True)
예제 #3
0
def run(is_distributed, logs_dir):
    from dataset import ShapesDataset
    from mrcnn.config import Config

    ######################
    class ShapesConfig(Config):
        NAME = "shapes"
        GPU_COUNT = 2
        IMAGES_PER_GPU = 2
        NUM_CLASSES = 1 + 3
        IMAGE_MIN_DIM = 128
        IMAGE_MAX_DIM = 128
        RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128)
        TRAIN_ROIS_PER_IMAGE = 32
        STEPS_PER_EPOCH = 10
        VALIDATION_STEPS = 5

    config = ShapesConfig()
    config.display()

    # Training dataset
    dataset_train = ShapesDataset()
    dataset_train.load_shapes(500000, config.IMAGE_SHAPE[0],
                              config.IMAGE_SHAPE[1])
    dataset_train.prepare()

    # Validation dataset
    dataset_val = ShapesDataset()
    dataset_val.load_shapes(5000, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
    dataset_val.prepare()
    if is_distributed:
        import mrcnn.distributed_model as modellib
    else:
        import mrcnn.model as modellib

    from mrcnn import utils

    # Local path to trained weights file
    COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")

    # Download COCO trained weights from Releases if needed
    if not os.path.exists(COCO_MODEL_PATH):
        utils.download_trained_weights(COCO_MODEL_PATH)

    # number of found devices by TF
    from tensorflow.python.client import device_lib
    device_lib.list_local_devices()

    # Create model in training mode
    model = modellib.MaskRCNN("training", config, logs_dir)

    # Load weights trained on MS COCO, but skip layers that
    # are different due to the different number of classes
    # See README  @ https://github.com/matterport/Mask_RCNNfor instructions to download the COCO weights
    model.load_weights(COCO_MODEL_PATH,
                       by_name=True,
                       exclude=[
                           "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox",
                           "mrcnn_mask"
                       ])

    model.train(dataset_train,
                dataset_val,
                learning_rate=config.LEARNING_RATE,
                epochs=1000,
                layers='heads')