コード例 #1
0
def main(args):
    set_gpu_growth()
    dataset = VocDataset(cfg.voc_path, class_mapping=cfg.class_mapping)
    dataset.prepare()
    train_img_info = [info for info in dataset.get_image_info_list() if info.type == 'trainval']  # 训练集
    print("train_img_info:{}".format(len(train_img_info)))
    test_img_info = [info for info in dataset.get_image_info_list() if info.type == 'test']  # 测试集
    print("test_img_info:{}".format(len(test_img_info)))

    m = ssd_model(cfg.feature_fn, cfg.cls_head_fn, cfg.rgr_head_fn, cfg.input_shape,
                  cfg.num_classes, cfg.specs, cfg.max_gt_num,
                  cfg.positive_iou_threshold, cfg.negative_iou_threshold,
                  cfg.negatives_per_positive, cfg.min_negatives_per_image)

    # 加载预训练模型
    init_epoch = args.init_epoch
    if args.init_epoch > 0:
        text = '{}-{}-{}'.format(cfg.base_model_name, args.batch_size, args.lr)
        m.load_weights('/tmp/ssd-{}.{:03d}.h5'.format(text, init_epoch), by_name=True)
    else:
        m.load_weights(cfg.pretrained_weight_path, by_name=True)
    # 生成器
    transforms = TrainAugmentation(cfg.image_size, cfg.mean_pixel, cfg.std)
    train_gen = Generator(train_img_info,
                          transforms,
                          cfg.input_shape,
                          args.batch_size,
                          cfg.max_gt_num)
    # 生成器
    val_trans = TrainAugmentation(cfg.image_size, cfg.mean_pixel, cfg.std)
    val_gen = Generator(test_img_info,
                        val_trans,
                        cfg.input_shape,
                        args.batch_size,
                        cfg.max_gt_num)
    optimizer = optimizers.SGD(
        lr=args.lr, momentum=args.momentum,
        clipnorm=args.clipnorm)
    m.compile(optimizer=optimizer,
              loss={"class_loss": lambda y_true, y_pred: y_pred,
                    "bbox_loss": lambda y_true, y_pred: y_pred})

    m.summary()

    # 训练
    m.fit_generator(train_gen,
                    epochs=args.epochs,
                    verbose=1,
                    initial_epoch=init_epoch,
                    validation_data=val_gen,
                    use_multiprocessing=False,
                    workers=10,
                    callbacks=get_call_back(args.lr, args.batch_size))
コード例 #2
0
def main():
    # dataset path
    input_images_path = './Face Dataset/input/'
    target_images_path = './Face Dataset/target/'
    [src_images, tar_images] = load_images(input_images_path,
                                           target_images_path)
    print('Loaded: ', src_images.shape, tar_images.shape)
    # save as compressed numpy array
    filename = './loaded_data/data.npz'
    savez_compressed(filename, src_images, tar_images)
    print('Saved dataset: ', filename)

    dataset = load_real_samples('./loaded_data/data.npz')
    print('Loaded', dataset[0].shape, dataset[1].shape)

    # image_shape = (HEIGHT,WIDTH,DEPTH)
    # define input shape based on the loaded dataset
    # image_shape = src_images[0].shape[1:]
    # define the models
    d_model = Discriminator().define_discriminator()
    g_model = Generator().define_generator()
    # define the composite model
    gan_model = GAN().define_gan(g_model, d_model)
    # train model
    train(d_model, g_model, gan_model, dataset)
コード例 #3
0
 def get_generator(self, type):
     return Generator(
         input_folder=self.folders[type]['LR'],
         label_folder=self.folders[type]['HR'],
         batch_size=self.batch_size,
         patch_size=self.patch_size,
         scale=self.scale,
         n_validation_samples=self.n_validation_samples,
         mode=type,
     )
コード例 #4
0
    def data_init(self):

        print("\nData init")
        self.dataset = Dataset()
        generator = Generator(self.config, self.dataset)
        self.train_generator = generator.generate()
        self.X_val, self.y_val = self.dataset.convert_to_arrays(
            self.config,
            self.dataset.get_partition(self.config)['val'])
        self.X_test, self.y_test = self.dataset.convert_to_arrays(
            self.config,
            self.dataset.get_partition(self.config)['test'])
コード例 #5
0
    def get_generator(self, generator_type, dataset_type):
        """Creates a generator of the specified type, with the specified dataset type."""
        self.create_random_dataset(dataset_type)

        generator = Generator(
            input_folder=self.dataset_folder[dataset_type]['LR'],
            label_folder=self.dataset_folder[dataset_type]['HR'],
            patch_size=self.patch_size['LR'],
            batch_size=self.batch_size,
            mode=generator_type,
            scale=self.scale,
        )

        return generator
コード例 #6
0
ファイル: models.py プロジェクト: yf817/miccai-cpm-ensemble
    def data_init(self):

        print("\nData init")
        #self.dataset = TCGA_Dataset(self.config)
        self.dataset = Dataset(self.config)

        generator = Generator(self.config, self.dataset)
        self.train_generator = generator.generate()

        self.X_val, self.y_val = self.dataset.convert_to_arrays(
            self.dataset._partition[0]['val'],
            self.dataset._partition[1]['val'],
            phase='val',
            size=self.config.sampling_size_val)

        self.X_test, self.y_test = self.dataset.convert_to_arrays(
            self.dataset._partition[0]['test'],
            self.dataset._partition[1]['test'],
            phase='test',
            size=self.config.sampling_size_test)

        self.y_test = self.patch_to_image(self.y_test, proba=False)
コード例 #7
0
        def union(count, union_times):
            uf = QuickFind(count)

            pairs = Generator.random_list(2, count, union_times)
            for p in pairs:
                uf.union(p[0], p[1])
コード例 #8
0
ファイル: main.py プロジェクト: msalcedo19/TransCity
def main():
    gen = Generator()
    GUI(gen)
コード例 #9
0
grade_validator = GradeValidator()
'''
    2. We initialize the repositories
        - Repository holds the business entities
        - Acts like the program's database
        - Might include some data validation
        - Does not know about any other components
'''
student_repository = StudentRepository()
assignment_repository = AssignmentRepository()
grade_repository = GradeRepository()
'''
    3. Call the generator methods for each of the student, assignment and grade repositories
'''

generator = Generator(student_repository, assignment_repository,
                      grade_repository)

generator.generate_students(100)
generator.generate_assignments(100)
generator.generate_grades(100)
'''
    4. We initialize the controllers
        - The controller implements the program's 'operations'
        - Knows only about the repository layer
        - 'Talks' to the repository and UI using parameter passing and exceptions
'''
student_controller = StudentController(student_validator, student_repository)
assignment_controller = AssignmentController(assignment_validator,
                                             assignment_repository)
grade_controller = GradeController(grade_validator, grade_repository,
                                   student_repository, assignment_repository)
コード例 #10
0
from utils.generator import Generator


class Solution:
    def removeElement(self, nums, val: int) -> int:
        left_index = 0
        right_index = len(nums)

        while left_index < right_index:
            if nums[left_index] == val:
                right_index -= 1
                nums[left_index], nums[right_index] = nums[right_index], nums[
                    left_index]
                left_index -= 1
            left_index += 1
        return left_index


if __name__ == '__main__':
    generator = Generator(
        size=5,
        low=0,
        high=10,
    )
    # array = generator.random_array()
    array = [0, 1, 2, 2, 3, 0, 4, 2]

    result = Solution().removeElement(nums=array, val=2)
    print(array, result)
    pass
コード例 #11
0
from sgd import SGD
from momentum import Momentum

# Global Hyperparamaters
max_epochs = 2000
batch_size = 60

# Load and prepare data
date, latitude, longitude, magnitude = Dataset.load_from_file("database.csv")
data_size = len(date)
vectorsX, vectorsY = Dataset.vectorize(date, latitude,
                                       longitude), magnitude.reshape(
                                           (data_size, 1))

# Get Batcher
batch_gen = Generator.gen_random_batch(batch_size, vectorsX, vectorsY)

# randomly initialize our weights with mean 0
syn0 = 2 * np.random.standard_normal((vectorsX.shape[1], 32)) / 10
syn1 = 2 * np.random.standard_normal((32, vectorsY.shape[1])) / 10

# Init trainer table and datalog
trainers = [SGD(syn0, syn1), Momentum(syn0, syn1), Adma(syn0, syn1)]
datalog = []

# Train model
x = x = np.arange(1, max_epochs)
for t in x:
    # Get Batch
    batch = next(batch_gen)
コード例 #12
0
def main():
    """
    main
    """
    config = get_config()

    if torch.cuda.is_available() and config.gpu >= 0:
        device = torch.device(config.gpu)
    else:
        device = torch.device('cpu')

    # Data definition
    tokenizer = lambda x: x.split()

    src_field = Field(
        sequential=True,
        tokenize=tokenizer,
        lower=True,
        batch_first=True,
        include_lengths=True
    )
    tgt_field = Field(
        sequential=True,
        tokenize=tokenizer,
        lower=True,
        batch_first=True,
        init_token=BOS_TOKEN,
        eos_token=EOS_TOKEN,
        include_lengths=True
    )

    fields = {
        'src': ('src', src_field),
        'tgt': ('tgt', tgt_field),
    }

    test_data = TabularDataset(
        path=config.data_path,
        format='json',
        fields=fields
    )

    with open(os.path.join(config.vocab_dir, 'src.vocab.pkl'), 'rb') as src_vocab:
        src_field.vocab = pickle.load(src_vocab)
    with open(os.path.join(config.vocab_dir, 'tgt.vocab.pkl'), 'rb') as tgt_vocab:
        tgt_field.vocab = pickle.load(tgt_vocab)

    test_iter = BucketIterator(
        test_data,
        batch_size=config.batch_size,
        device=device,
        shuffle=False
    )

    # Model definition
    src_embedding = nn.Embedding(len(src_field.vocab), config.embedding_size)
    tgt_embedding = nn.Embedding(len(tgt_field.vocab), config.embedding_size)
    assert config.model in ['rnn', 'transformer']
    if config.model == 'rnn':
        model = Seq2Seq(
            src_embedding=src_embedding,
            tgt_embedding=tgt_embedding,
            embedding_size=config.embedding_size,
            hidden_size=config.hidden_size,
            vocab_size=len(tgt_field.vocab),
            start_index=tgt_field.vocab.stoi[BOS_TOKEN],
            end_index=tgt_field.vocab.stoi[EOS_TOKEN],
            padding_index=tgt_field.vocab.stoi[PAD_TOKEN],
            bidirectional=config.bidirectional,
            num_layers=config.num_layers,
            dropout=config.dropout
        )
    elif config.model == 'transformer':
        model = Transformer(
            src_embedding=src_embedding,
            tgt_embedding=tgt_embedding,
            embedding_size=config.embedding_size,
            hidden_size=config.hidden_size,
            vocab_size=len(tgt_field.vocab),
            start_index=tgt_field.vocab.stoi[BOS_TOKEN],
            end_index=tgt_field.vocab.stoi[EOS_TOKEN],
            padding_index=tgt_field.vocab.stoi[PAD_TOKEN],
            num_heads=config.num_heads,
            num_layers=config.num_layers,
            dropout=config.dropout,
            learning_position_embedding=config.learning_position_embedding,
            embedding_scale=config.embedding_scale,
            num_positions=config.num_positions
        )

    model.load(filename=config.ckpt)
    model.to(device)

    # Save directory
    if not os.path.exists(config.save_dir):
        os.makedirs(config.save_dir)
    # Logger definition
    logger = logging.getLogger(__name__)
    logging.basicConfig(level=logging.DEBUG, format="%(message)s")
    fh = logging.FileHandler(os.path.join(config.save_dir, "test.log"))
    logger.addHandler(fh)

    # Generator definition
    if config.per_node_beam_size is None:
        config.per_node_beam_size = config.beam_size
    if not os.path.exists(config.save_dir):
        os.mkdir(config.save_dir)
    generator = Generator(
        model=model,
        data_iter=test_iter,
        src_vocab=src_field.vocab,
        tgt_vocab=tgt_field.vocab,
        logger=logger,
        beam_size=config.beam_size,
        per_node_beam_size=config.per_node_beam_size,
        result_path=os.path.join(config.save_dir, "result.txt")
    )

    # Save config
    params_file = os.path.join(config.save_dir, "params.json")
    with open(params_file, 'w') as fp:
        json.dump(config.__dict__, fp, indent=4, sort_keys=True)
    print("Saved params to '{}'".format(params_file))
    logger.info(model)

    generator.generate()
    logger.info("Testing done!")
コード例 #13
0
ファイル: main.py プロジェクト: plcedoz/DICOM_MRI
from utils.generator import Generator
from utils.plot import plot_generator
from utils.plot import plot_histogram
from utils.plot import plot_segmentation
from segmentation import get_i_contour

n_samples = 50

if __name__ == "__main__":
    #Part 1: Parse the o-contours
    print("\nPart 1 :\n")

    #Extract the data: images and segmentation (inner contours and outer contours)
    config = Config()
    image_files, labels = get_data()
    generator = Generator(config).generate(image_files, labels)
    #Plot some samples
    plot_generator(generator, n_samples)

    #Part 2: Heuristic LV segmentation
    print("\nPart 2 :\n")

    #Extract samples from the generator
    images, (i_masks, o_masks) = next(generator)

    #Q2.1.: Intensity-based prediction
    print("Intensity-based prediction")
    for image, o_mask, i_mask in zip(images[0:n_samples], o_masks[0:n_samples],
                                     i_masks[0:n_samples]):
        #Extract the contour using the intensity-based method
        i_contour = get_i_contour(image,
コード例 #14
0
ファイル: DRAM.py プロジェクト: alexmomeni/HistoDram
 def data_init(self):
     print("\nData init")
     self.dataset = Dataset(self.config)
     self.generator = Generator(self.config, self.dataset)
コード例 #15
0
from utils.generator import Generator # generates cola logos on image
from utils.converter import Converter # converts VOC data to ssd train data
from utils.check_imgs_generator import Check_imgs_generator # generates check images

colas_path = 'colas'
g = Generator(colas_path, [('cola_black.png', 0.05), ('cola_red.png', 0.35), ('cola_white.png', 0.6)],
              (50, 50),
              (20, 40),
              90,
              (2, 3),
              0.6)

c = Converter(300, g)
src_path = '/media/renat/hdd1/workdir/datasets/images/VOCdevkit/VOC2012/JPEGImages'
dst_path = '/media/renat/hdd1/workdir/docker/ssd-af/data/VOCdevkit/cola'
c.perform_conversion(src_path, dst_path, 1e10) # achtung! parts hardcoded

check_imgs_generator = Check_imgs_generator()
check_imgs_generator.generate_check_imgs(dst_path, ['trainval', 'test']) # achtung! parts hardcoded
コード例 #16
0
 def __init__(self, executor: Executor):
     self.generator = Generator(executor)