예제 #1
0
    def __init__(
        self,
        infer_model,
        encoder_name='resnet34',
        encoder_weights='imagenet',
        decoder_use_batchnorm=True,
        decoder_channels=(256, 128, 64, 32, 16),
        classes=1,
        activation=None,
        center=None,  # usefull for VGG models
        attention_type=None,
        tta=False,
        adapt_input=1,
        preprocess=True,
        classification=False,
    ):
        encoder = get_encoder(encoder_name, encoder_weights=encoder_weights)
        decoder = UnetDecoder(
            encoder_channels=encoder.out_shapes,
            decoder_channels=decoder_channels,
            final_channels=classes,
            use_batchnorm=decoder_use_batchnorm,
            center=center,
            attention_type=attention_type,
            classification=classification,
        )

        super().__init__(infer_model, encoder, decoder, activation, tta,
                         adapt_input, preprocess)

        self.name = 'ref-u-{}'.format(encoder_name)
    def __init__(
            self,
            encoder_name='resnet34',
            encoder_weights='imagenet',
            input_shape=(3, 384),
            classes=1,
            activation='sigmoid',
            features = 256,
            tta=False 
    ):
        encoder = get_encoder(
            encoder_name,
            encoder_weights=encoder_weights
        )

        decoder =RefineNetPoolingImproveDecoder(
             input_shape,
             encoder_channels=encoder.out_shapes,
             num_classes=classes,
             features=features
        )

        super().__init__(encoder, decoder, activation, tta)

        self.name = 'refinenet-pooling-improve-{}'.format(encoder_name)
예제 #3
0
    def __init__(self,
                 encoder_name='resnet34',
                 encoder_weights='imagenet',
                 decoder_pyramid_channels=256,
                 decoder_segmentation_channels=128,
                 classes=1,
                 dropout=0.2,
                 activation='sigmoid',
                 final_upsampling=4,
                 decoder_merge_policy='add',
                 tta=False):
        encoder = get_encoder(encoder_name, encoder_weights=encoder_weights)

        decoder = FPNDecoder(
            encoder_channels=encoder.out_shapes,
            pyramid_channels=decoder_pyramid_channels,
            segmentation_channels=decoder_segmentation_channels,
            final_channels=classes,
            dropout=dropout,
            final_upsampling=final_upsampling,
            merge_policy=decoder_merge_policy)

        super().__init__(encoder, decoder, activation, tta)

        self.name = 'fpn-{}'.format(encoder_name)
    def __init__(
            self,
            encoder_name='resnet34',
            encoder_weights='imagenet',
            decoder_use_batchnorm=True,
            decoder_channels=(256, 128, 64, 32, 16),
            classes=1,
            activation='sigmoid',
            center=False,  # usefull for VGG models
    ):
        encoder = get_encoder(
            encoder_name,
            encoder_weights=encoder_weights
        )

        decoder = UnetDecoder(
            encoder_channels=encoder.out_shapes,
            decoder_channels=decoder_channels,
            final_channels=classes,
            use_batchnorm=decoder_use_batchnorm,
            center=center,
        )

        super().__init__(encoder, decoder, activation)

        self.name = 'u-{}'.format(encoder_name)
        self.name = 'u-{}'.format(encoder_name)
    def __init__(self,
                 encoder_name='resnet34',
                 encoder_weights='imagenet',
                 psp_in_factor=8,
                 psp_out_channels=512,
                 psp_use_batchnorm=True,
                 psp_aux_output=False,
                 classes=21,
                 dropout=0.2,
                 activation='softmax',
                 tta=False):
        encoder = get_encoder(encoder_name, encoder_weights=encoder_weights)

        decoder = PSPDecoder(
            encoder_channels=encoder.out_shapes,
            downsample_factor=psp_in_factor,
            psp_out_channels=psp_out_channels,
            final_channels=classes,
            dropout=dropout,
            aux_output=psp_aux_output,
            use_batchnorm=psp_use_batchnorm,
        )

        super().__init__(encoder, decoder, activation, tta)

        self.name = 'psp-{}'.format(encoder_name)
예제 #6
0
def embed_sentences(sentences, args):
    """Create sentence embeddings from raw sentences.

    Save the output to disk, either as ```nmslib.index``` or ``np.array``.

    Args:
        sentences (np.array): input sentences.
        args (argparse.Namespace): command line arguments.
    """
    encoder = get_encoder(args.encoder)
    start = time.time()
    embeddings = encoder.encode(sentences)
    encoding_time = time.time() - start
    embeddings_dim = embeddings[0].shape[0]
    print(f'-I- Done in {encoding_time} seconds.')

    if args.indices:
        embeddings_ids = np.loadtxt(args.indices, dtype=int, delimiter='\n')
    else:
        embeddings_ids = None

    # remove principal components
    if args.encoder == 'simple_encoder' and args.remove_components > 0 and encoder.components_ is None:
        components_ = svd_components(embeddings,
                                     n_components=args.remove_components)
        embeddings -= embeddings.dot(components_.transpose()).dot(components_)

        components_fname = os.path.join(
            args.output_dir,
            f'{args.encoder.replace("_", "-")}-{embeddings_dim}d-components.npy'
        )
        print(f'-I- Saving PCA components to {components_fname}')
        np.save(components_fname, components_)

    # persist to disk (either .npy / nmslib / annoy )
    if args.index:
        target = os.path.join(
            args.output_dir,
            f'{args.encoder.replace("_", "-")}-{args.index}-{embeddings_dim}d.bin'
        )

        index = NMSLibCorpusIndex(dim=embeddings_dim)

        print(f'-I- Building {args.index} ANN Index: {target}')
        start = time.time()
        index.add_dense(embeddings, ids=embeddings_ids)
        index.save(target)
        indexing_time = time.time() - start
        print(f'-I- Done in {int(indexing_time / 60)} minutes.')
    else:
        target = os.path.join(
            args.output_dir,
            f'{args.encoder.replace("_", "-")}-{embeddings_dim}d.npy')
        print(f'-I- Saving embeddings to file: {target}')
        np.save(target, embeddings)
    def __init__(
        self,
        encoder_name: str = "resnet34",
        encoder_depth: int = 5,
        encoder_weights: Optional[str] = "imagenet",
        decoder_pyramid_channels: int = 256,
        decoder_segmentation_channels: int = 128,
        decoder_merge_policy: str = "add",
        decoder_dropout: float = 0.2,
        in_channels: int = 3,
        classes: int = 1,
        activation: Optional[str] = None,
        upsampling: int = 4,
        aux_params: Optional[dict] = None,
    ):
        super().__init__()

        self.encoder = get_encoder(
            encoder_name,
            in_channels=in_channels,
            depth=encoder_depth,
            weights=encoder_weights,
        )

        self.decoder = FPNDecoder(
            encoder_channels=self.encoder.out_channels,
            encoder_depth=encoder_depth,
            pyramid_channels=decoder_pyramid_channels,
            segmentation_channels=decoder_segmentation_channels,
            dropout=decoder_dropout,
            merge_policy=decoder_merge_policy,
        )

        self.segmentation_head = SegmentationHead(
            in_channels=self.decoder.out_channels,
            out_channels=classes,
            activation=activation,
            kernel_size=1,
            upsampling=upsampling,
        )

        if aux_params is not None:
            self.classification_head = ClassificationHead(
                in_channels=self.encoder.out_channels[-1], **aux_params
            )
        else:
            self.classification_head = None

        self.name = "fpn-{}".format(encoder_name)
        self.initialize()
    def __init__(self,
                 encoder_name='resnet34',
                 encoder_weights='imagenet',
                 decoder_use_batchnorm=True,
                 classes=1,
                 activation='sigmoid',
                 tta=False):
        encoder = get_encoder(encoder_name, encoder_weights=encoder_weights)

        decoder = LinknetDecoder(
            encoder_channels=encoder.out_shapes,
            prefinal_channels=32,
            final_channels=classes,
            use_batchnorm=decoder_use_batchnorm,
        )

        super().__init__(encoder, decoder, activation, tta)

        self.name = 'link-{}'.format(encoder_name)
예제 #9
0
파일: app.py 프로젝트: sarahJune1/covid19
def create_app(env=None):
    """Create falcon WSGI application.

    Args:
        env (dict-like): environment variables.

    Returns:
        falcon.API
    """
    # By default we use the system environment.
    env = env or os.environ

    # Init encoder
    encoder_name = env.get('ENCODER', 'simple_encoder')
    print(f"Init  Simple Encoder - {encoder_name}")
    sentence_encoder = get_encoder(encoder_name)

    # Init corpus index
    corpus_index_fname = env.get(
        'CORPUS_INDEX', os.path.join(data_dir,
                                     'simple-encoder-nmslib-100d.bin'))
    print(f"Init Corpus Index - {corpus_index_fname}")
    corpus_index = load_corpus_index(corpus_index_fname)

    # Init DB
    print('Init Covid DB')
    db_session = get_session(conn=os.getenv(
        'DB_CONNECTION', os.path.join(data_dir, 'covid19.sqlite')))

    # Create WSGI application
    app = falcon.API()  # pylint: disable=invalid-name

    # Init routes
    app.add_route(
        '/similar',
        CovidSimilarityResource(corpus_index, sentence_encoder, db_session))

    print('*** WSGI application is ready ***')
    return app
예제 #10
0
    def __init__(
        self,
        encoder_name='resnet34',
        encoder_weights='imagenet',
        decoder_use_batchnorm=True,
        classes=1,
        activation='sigmoid',
        mid_channel=128,
        tta=False,
    ):
        encoder = get_encoder(encoder_name, encoder_weights=encoder_weights)

        decoder = FastFCNDecoder(
            encoder_channels=encoder.out_shapes,
            final_channels=classes,
            use_batchnorm=decoder_use_batchnorm,
            mid_channel=mid_channel,
        )

        super().__init__(encoder, decoder, activation, tta)

        self.name = 'fastfcn-{}'.format(encoder_name)
예제 #11
0
파일: model.py 프로젝트: csinva/local-vae
def init_specific_model(model_type, img_size, latent_dim, hidden_dim=None):
    """
    Return an instance of a VAE with encoder and decoder from `model_type`.
    Parameters
    ----------
    img_size : tuple of ints for model_type=Burgess, int for model_type=Lin
        Size or Dimension of images 
    """
    model_type = model_type.lower().capitalize()
    get_enc = get_encoder(model_type)
    get_dec = get_decoder(model_type)
    if model_type == "Burgess":
        encoder = get_enc(img_size, latent_dim)
        decoder = get_dec(img_size, latent_dim)
    elif model_type == "Lin":
        encoder = get_enc(img_size, latent_dim, hidden_dim)
        decoder = get_dec(img_size, latent_dim, hidden_dim)
    else:
        err = "Unkown model_type={}. Possible values: {}"
        raise ValueError(err.format(model_type, MODELS))

    model = VAE(encoder, decoder)
    model.model_type = model_type  # store to help reloading
    return model
예제 #12
0
    def __init__(
        self,
        encoder_name='resnet34',
        encoder_weights='imagenet',
        decoder_use_batchnorm=True,
        decoder_channels=(256, 128, 64, 32, 16),
        classes=1,
        activation='sigmoid',
        center=None,  # usefull for VGG models
        attention_type=None,
        tta=False,
    ):
        encoder = get_encoder(encoder_name, encoder_weights=encoder_weights)

        decoder = HyperColumnsDecoder(encoder_channels=encoder.out_shapes,
                                      decoder_channels=decoder_channels,
                                      final_channels=classes,
                                      use_batchnorm=decoder_use_batchnorm,
                                      center=center,
                                      attention_type=attention_type)

        super().__init__(encoder, decoder, activation, tta)

        self.name = 'HyperColumns-{}'.format(encoder_name)
    def __init__(
            self,
            encoder_type,
            shape_decoder_type,
            texture_decoder_type,
            discriminator_type,
            vertex_scaling,
            texture_scaling,
            silhouette_loss_levels,
            lambda_silhouettes,
            lambda_textures,
            lambda_perceptual,
            lambda_inflation,
            lambda_discriminator,
            lambda_discriminator2,
            lambda_graph_laplacian,
            single_view_training,
            class_conditional,
            no_texture,
            num_views,
            dim_hidden=512,
            anti_aliasing=False,
    ):
        super(ShapeNetModel, self).__init__()
        self.trainer = None
        self.dataset_name = 'shapenet'

        # model size
        self.dim_hidden = dim_hidden

        # loss weights
        self.silhouette_loss_levels = silhouette_loss_levels
        self.lambda_silhouettes = lambda_silhouettes
        self.lambda_textures = lambda_textures
        self.lambda_perceptual = lambda_perceptual
        self.lambda_discriminator = lambda_discriminator
        self.lambda_discriminator2 = lambda_discriminator2
        self.lambda_inflation = lambda_inflation
        self.lambda_graph_laplacian = lambda_graph_laplacian

        # others
        self.single_view_training = single_view_training
        self.class_conditional = class_conditional
        self.no_texture = no_texture
        self.num_views = num_views
        self.use_depth = False

        # setup renderer
        self.renderer = neural_renderer.Renderer()
        self.renderer.image_size = 224
        self.renderer.anti_aliasing = anti_aliasing
        self.renderer.perspective = True
        self.renderer.viewing_angle = self.xp.degrees(self.xp.arctan(16. / 60.))
        self.renderer.camera_mode = 'look_at'
        self.renderer.blur_size = 0

        with self.init_scope():
            # setup links
            dim_in_encoder = 3
            if no_texture:
                texture_decoder_type = 'dummy'
                dim_in_discriminator = 1
            else:
                dim_in_discriminator = 4

            self.encoder = encoders.get_encoder(encoder_type, dim_in_encoder, self.dim_hidden)
            self.shape_decoder = decoders.get_shape_decoder(shape_decoder_type, self.dim_hidden, vertex_scaling)
            self.texture_decoder = decoders.get_texture_decoder(texture_decoder_type, self.dim_hidden, texture_scaling)
            self.discriminator = discriminators.get_discriminator(discriminator_type, dim_in_discriminator)
            self.shape_encoder = self.encoder
예제 #14
0
    def __init__(self,
                 opt,
                 lexicon,
                 pretrained_wembs=None,
                 pretrained_user=None,
                 lang_model=None):
        """Constructor

        :param input_dim: Embedding dimension
        :param hidden_dim: Dimension of the recurrent layers
        :param att_dim: Dimension of the hidden layer in the attention MLP
        :param lexicon: Lexicon object containing dictionaries/bilingual discrete lexicons...
        :param enc_type: Type of encoder
        :param att_type: Type of attention mechanism
        :param dec_type: Type of decoder
        :param model_file: File where the model should be saved (default: (None))
        :param label_smoothing: interpolation coefficient with second output distribution
        :param dropout: dropout rate for parameters
        :param word_dropout: dropout rate for words in the decoder
        :param max_len: Maximum length allowed when generating translations (default: (60))
        """
        # Store config
        self.nl = opt.num_layers
        self.dr, self.wdr = opt.dropout_rate, opt.word_dropout_rate
        self.ls, self.ls_eps = (opt.label_smoothing > 0), opt.label_smoothing
        self.max_len = opt.max_len
        self.src_sos, self.src_eos = lexicon.w2ids['SOS'], lexicon.w2ids['EOS']
        self.trg_sos, self.trg_eos = lexicon.w2idt['SOS'], lexicon.w2idt['EOS']
        # Dimensions
        self.vs, self.vt = len(lexicon.w2ids), len(lexicon.w2idt)
        self.du = opt.usr_dim
        self.nu = len(lexicon.usr2id)
        self.di, self.dh, self.da = opt.emb_dim, opt.hidden_dim, opt.att_dim
        # Model
        self.pc = dy.ParameterCollection()
        self.model_file = opt.model
        # Encoder
        self.enc = encoders.get_encoder(opt.encoder,
                                        self.nl,
                                        self.di,
                                        self.dh,
                                        self.du,
                                        self.vs,
                                        self.pc,
                                        dr=self.dr,
                                        pre_embs=pretrained_wembs)
        # Attention module
        self.att = attention.get_attention(opt.attention, self.enc.dim,
                                           self.dh, self.da, self.pc)
        # Decoder
        self.dec = decoders.get_decoder(opt.decoder,
                                        self.nl,
                                        self.di,
                                        self.enc.dim,
                                        self.dh,
                                        self.vt,
                                        self.du,
                                        self.pc,
                                        pre_embs=pretrained_wembs,
                                        dr=self.dr,
                                        wdr=self.wdr)
        # User recognizer parameters
        self.usr = user.ZeroVocabUserRecognizer(self.vt, self.nu, self.enc.dim,
                                                self.pc)
        # Target language model (for label smoothing)
        self.lm = lang_model
        self.lex = lexicon
        self.unk_replace = opt.unk_replacement
        self.user_token = opt.user_token
        self.test = True
        self.update = True
def decode_anchor(anchor: AnchorCfg):
    enc = get_encoder(anchor.encoder_id)
    if enc == None:
        raise Exception(f'unknown encoder: {anchor.encoder_id}')
    for var in anchor.variants:
        enc.decode_variant(var)
예제 #16
0
    tokenizer=tokenizer,  # TODO: token_indexer 的 key
    token_indexers={'tokens': token_indexers})
train_ds, test_ds = [reader.read(fname) for fname in ['train', 'test']]
val_ds = None

voc = Vocabulary()

iterator = BucketIterator(batch_size=config.batch_size,
                          sorting_keys=[('sentence', 'num_tokens')])
iterator.index_with(vocab=voc)

# 2. 搭建模型

word_embeddings = get_embedder()

encoder = get_encoder(voc, word_embeddings.get_output_dim())

model = BaseModelWithoutKnowledge(voc=voc,
                                  word_embeddings=word_embeddings,
                                  encoder=encoder,
                                  out_sz=reader.label_length,
                                  multi=False)
model = model.cuda(cuda_device) if cuda_device > -1 else model
# 3. 训练
optimizer = optim.Adam(model.parameters(), lr=config.lr)

trainer = Trainer(
    model=model,
    optimizer=optimizer,
    iterator=iterator,
    train_dataset=train_ds,
예제 #17
0
    def __init__(
        self,
        encoder_type,
        shape_decoder_type,
        texture_decoder_type,
        discriminator_type,
        silhouette_loss_type,
        vertex_scaling,
        texture_scaling,
        silhouette_loss_levels,
        lambda_silhouettes,
        lambda_perceptual,
        lambda_inflation,
        lambda_graph_laplacian,
        lambda_discriminator,
        no_texture,
        class_conditional,
        symmetric,
        dim_hidden=512,
        image_size=224,
        anti_aliasing=False,
    ):
        super(PascalModel, self).__init__()
        self.trainer = None
        self.dataset_name = 'pascal'

        # model size
        self.dim_hidden = dim_hidden

        # loss type
        self.silhouette_loss_type = silhouette_loss_type
        self.silhouette_loss_levels = silhouette_loss_levels

        # loss weights
        self.lambda_silhouettes = lambda_silhouettes
        self.lambda_perceptual = lambda_perceptual
        self.lambda_discriminator = lambda_discriminator
        self.lambda_inflation = lambda_inflation
        self.lambda_graph_laplacian = lambda_graph_laplacian

        # others
        self.no_texture = no_texture
        self.class_conditional = class_conditional
        self.symmetric = symmetric
        self.use_depth = False

        # setup renderer
        self.renderer = neural_renderer.Renderer()
        self.renderer.image_size = image_size
        self.renderer.anti_aliasing = anti_aliasing
        self.renderer.perspective = False
        self.renderer.camera_mode = 'none'

        with self.init_scope():
            # setup links
            dim_in_encoder = 3
            if no_texture:
                texture_decoder_type = 'dummy'
                dim_in_discriminator = 1
            else:
                dim_in_discriminator = 4

            self.encoder = encoders.get_encoder(encoder_type, dim_in_encoder,
                                                self.dim_hidden)
            self.shape_decoder = decoders.get_shape_decoder(
                shape_decoder_type, self.dim_hidden, vertex_scaling, symmetric)
            self.texture_decoder = decoders.get_texture_decoder(
                texture_decoder_type, self.dim_hidden, texture_scaling,
                self.symmetric)
            self.discriminator = discriminators.get_discriminator(
                discriminator_type, dim_in_discriminator)