Esempio n. 1
0
    def __init__(self, model_name):
        self.asset_dir = os.path.join(Config.get('models_dir'), model_name)
        os.system('mkdir -p {}'.format(self.asset_dir))
        self.asset_url_map = {}

        model_configs = Config.get('models')
        for conf in model_configs:
            if conf.get('name') == model_name:
                asset_urls = conf.get('asset_urls')
                for asset in asset_urls:
                    self.asset_url_map[asset['name']] = asset['url']
Esempio n. 2
0
    def __init__(self, is_base: bool = True):
        super().__init__()
        self.config = Config()
        self.config.add_model(is_base)

        self.mha = MultiHeadAttention(masked_attention=False)
        self.attention_dropout = nn.Dropout(
            p=self.config.model.model_params.dropout)
        self.ln = LayerNorm(self.config.model.train_hparams.eps)
        self.ffn = FeedForwardNetwork()
        self.residual_dropout = nn.Dropout(
            p=self.config.model.model_params.dropout)
Esempio n. 3
0
    def handle(cls):

        if Config.get('model') == 'ssd':
            cls.model = SSDModel()

        logger.debug('Start serving ...')
        full_video_path = os.path.join(Config.get('videos_dir'),
                                       Config.get('serve').get('video'))

        url = None
        precomputed_labels = None
        full_annotated_path = None
        confs = Config.get('videos')
        for conf in confs:
            if conf.get('name') == Config.get('serve').get('video'):
                url = conf.get('url')
                precomputed_labels = conf.get('precomputed_labels')
                full_annotated_path = os.path.join(Config.get('videos_dir'),
                                                   conf.get('annotated_name'))
                break

        # download video if necessary
        if os.path.exists(full_video_path):
            logger.debug('video already exists, skip downloading')
        else:
            os.system(
                'mkdir -p {} && wget {} -O {} --force-directories'.format(
                    Config.get('videos_dir'), url, full_video_path))

        logger.debug('Processing video at {}'.format(full_video_path))
        logger.debug(
            'Producing annotated video to {}'.format(full_annotated_path))

        # load precomputed labels if possible
        precomputed_labels_path = os.path.join(Config.get('videos_dir'),
                                               precomputed_labels)
        if os.path.exists(precomputed_labels_path):
            cls.use_precomputed = True
            with open(precomputed_labels_path, 'r') as f:
                lines = f.readlines()
                for l in lines:
                    cls.scores.append(ujson.loads(l))
            logger.debug(
                'precomputed labels file exists, skip real time prediction')

        score_fn = cls.process_precomputed if cls.use_precomputed == True else cls.process
        fps = 50 if cls.use_precomputed == True else 1000

        video_processor = VideoProcessor(full_video_path, score_fn,
                                         full_annotated_path)
        video_processor.start(
            max_frame_num=Config.get('serve').get('max_frame_num'), fps=fps)

        if cls.use_precomputed == False and len(cls.scores) > 0:
            with open(precomputed_labels_path, 'w+') as f:
                for score in cls.scores:
                    f.write(str(score) + '\n')
Esempio n. 4
0
 def __init__(self, masked_attention: bool = False, is_base: bool = True):
     super().__init__()
     self.attention = Attention(masked_attention)
     config = Config()
     config.add_model(is_base)
     self.batch_size = config.model.train_hparams.batch_size
     self.dim_model: int = config.model.model_params.dim_model
     self.dim_v: int = config.model.model_params.dim_v
     self.num_heads = config.model.model_params.num_heads
     assert (self.dim_model // self.num_heads) == self.dim_v
     assert (
         self.dim_model %
         self.num_heads == 0), "embed_dim must be divisible by num_heads"
     self.linear = nn.Linear(self.num_heads * self.dim_v, self.dim_model)
Esempio n. 5
0
class TrainHandler(object):

    data_sets = Config.get('train').get('data_sets', [])
    holdout_percentage = Config.get('holdout_percentage', 0.1)

    @classmethod
    def handle(cls):
        cls._download()
        train_set, val_set = cls._process()
        cls._train(train_set, val_set)

    @classmethod
    def _download(cls):
        logger.debug('Fetching data sets: {}'.format(cls.data_sets))
        for name in cls.data_sets:
            RawProcessor.download(name)

    @classmethod
    def _process(cls):
        '''
        Load raw data and labels, split them into training sets and validation sets.
        :return: None
        '''
        raw_data_map = RawProcessor.load_raw_data(cls.data_sets)
        raw_label_map = RawProcessor.load_raw_labels(cls.data_sets)

        seed(0)
        shuffled_keys = [k for k in raw_data_map]
        shuffle(shuffled_keys)

        split_index = int(
            round(len(shuffled_keys) * (1 - cls.holdout_percentage)))
        train_keys = shuffled_keys[:split_index]
        val_keys = shuffled_keys[split_index:]

        train_set = [(k, raw_data_map[k], raw_label_map[k])
                     for k in train_keys]
        val_set = [(k, raw_data_map[k], raw_label_map[k]) for k in val_keys]

        return train_set, val_set

    @classmethod
    def _train(cls, train_set, val_set):

        model = None
        if Config.get('model') == 'ssd':
            model = SSDModel()

        model.train(train_set, val_set)
class DecoderLayer(nn.Module):
    """Decoder layer block"""
    def __init__(self, is_base: bool = True):
        super().__init__()
        self.config = Config()
        self.config.add_model(is_base)

        self.masked_mha = MultiHeadAttention(masked_attention=True)
        self.mha = MultiHeadAttention(masked_attention=False)
        self.ln = LayerNorm(self.config.model.train_hparams.eps)
        self.ffn = FeedForwardNetwork()
        self.residual_dropout = nn.Dropout(
            p=self.config.model.model_params.dropout)

    def attention_mask(self, batch_size: int, seq_len: int) -> Tensor:
        attention_shape = (batch_size, seq_len, seq_len)
        attention_mask = np.triu(np.ones(attention_shape), k=1).astype("unit8")
        attention_mask = torch.from_numpy(attention_mask) == 0
        return attention_mask  # (batch_size, seq_len, seq_len)

    def forward(
        self,
        target_emb: Tensor,
        target_mask: Tensor,
        encoder_out: Tensor,
        encoder_mask: Optional[Tensor] = None,
    ) -> Tuple[Tensor, Tensor]:
        """
        Args:
            target_emb: input to the decoder layer (batch_size, seq_len, dim_model)
            target_mask: padding mask of the target embedding
            encoder_out: the last encoder layer's output (batch_size, seq_len, dim_model)
            encoder_mask: boolean Tensor where padding elements are indicated by False (batch_size, seq_len)
        """
        attention_mask = self.attention_mask(target_emb.size(0),
                                             target_emb.size(1))
        target_emb = target_emb + self.masked_mha(
            query=target_emb,
            key=target_emb,
            value=target_emb,
            attention_mask=attention_mask,
        )
        target_emb = self.ln(target_emb)
        target_emb = target_emb + self.mha(
            query=target_emb, key=encoder_out, value=encoder_out)
        target_emb = self.ln(target_emb)
        target_emb = target_emb + self.ffn(target_emb)
        return target_emb, target_mask
Esempio n. 7
0
    def _train(cls, train_set, val_set):

        model = None
        if Config.get('model') == 'ssd':
            model = SSDModel()

        model.train(train_set, val_set)
class ModelConstants(object):
    MODEL_NAME = 'ssd'
    CHECKPOINT_PRETRAINED_FILE = 'ssd_300_vgg.ckpt.zip'
    CHECKPOINT_PRETRAINED = 'ssd_300_vgg.ckpt'
    CHECKPOINT_TRAINED = 'ssd_trained.ckpt'

    FULL_ASSET_PATH = os.path.join(Config.get('models_dir'), MODEL_NAME)
Esempio n. 9
0
    def __init__(self, pipe_params: Dict[str, Any] = {}, model_name: str = 'baseline') -> None:
        super().__init__(pipe_params)
        self.pipeline.steps.append(('clf', FlatPredictor()))
        self.pipeline.set_params(**pipe_params)

        self.save_path = Config()['Baseline']['model_path']
        self.model_name = model_name
Esempio n. 10
0
    def __init__(self, pipe_params: Dict[str, Any] = {}, model_name: str = 'default_gbm') -> None:
        super().__init__(pipe_params)
        self.pipeline.steps.append(('clf', LGBMClassifier()))
        self.pipeline.set_params(**pipe_params)

        self.save_path = Config()['LGBM']['model_path']
        self.model_name = model_name
Esempio n. 11
0
    def _test(cls, test_set):

        model = None
        if Config.get('model') == 'ssd':
            model = SSDModel()

        output_dir = Config.get('test').get('output_path')
        slide_show = Config.get('test').get('slide_show')
        json_lines = []

        results = model.test(test_set, show=slide_show)

        for instance, result in zip(test_set, results):
            json_lines.append(cls._serialize(instance[0], result))

        with open(output_dir, 'w+') as f:
            f.writelines(json_lines)
Esempio n. 12
0
    def _signandpost(self, param, name):
        u"""签名并发送"""
        sig = Encrypt().sign(param)
        param['sign'] = sig
        params_json = json.dumps(param)
        # print params_json
        log(name, params_json, 'info')

        url = ReadXML(Config().get('data', 'url_xml')).get_url(name)
        return self._header().post(url, params_json).content
Esempio n. 13
0
    def __init__(self,
                 max_len: int,
                 embedding_dim: int,
                 is_base: bool = True) -> None:
        super().__init__()
        config = Config()
        config.add_model(is_base)

        self.dropout = nn.Dropout(p=config.model.model_params.dropout)
        positional_encoding = torch.zeros(max_len, embedding_dim)
        position = torch.arange(0, max_len,
                                dtype=torch.float).unsqueeze(1)  # (max_len, 1)
        div_term = torch.exp(
            torch.arange(0, embedding_dim, 2).float() / embedding_dim *
            math.log(1e4))
        positional_encoding[:, 0::2] = torch.sin(position / div_term)
        positional_encoding[:, 1::2] = torch.cos(position / div_term)
        positional_encoding = positional_encoding.unsqueeze(0).transpose(
            0, 1)  # (max_len, 1, embedding_dim)
        self.register_buffer("positional_encoding",
                             positional_encoding)  # TODO: register_buffer?
Esempio n. 14
0
    def setUp(self):
        self.current_path = os.path.abspath(os.path.dirname(__file__))
        fullpath = os.path.join(self.current_path, "test_config")

        data = []
        data.append("[Ignore]")
        data.append("list = user1,user2,user3,user4,user5,user6")
        data.append("list3 = ")
        data.append("list4 = user10")
        data.append("")
        data.append("[Channels]")
        data.append("general = 000000000000000000001")
        data.append("test = 000000000000000000002")
        data.append("awesome = 000000000000000000003")
        data.append("asdf = 000000000000000000004")
        data.append("cool = 000000000000000000005")
        data.append("voice = 000000000000000000006")

        with open(fullpath, 'w') as f:
            f.write("\n".join(data))

        self.channel_config = Config(fullpath, "Channels")
        self.ignore_config = Config(fullpath, "Ignore")
Esempio n. 15
0
class TestHandler(object):
    data_sets = Config.get('test').get('data_sets', [])

    @classmethod
    def handle(cls):
        cls._download()
        test_set = cls._process()
        cls._test(test_set)

    @classmethod
    def _download(cls):
        logger.debug('Fetching data sets: {}'.format(cls.data_sets))
        for name in cls.data_sets:
            RawProcessor.download(name)

    @classmethod
    def _process(cls):
        '''
        Load raw data as list of tuples.
        :return: None
        '''
        raw_data_map = RawProcessor.load_raw_data(cls.data_sets)
        return [(k, raw_data_map[k], None) for k in raw_data_map]

    @classmethod
    def _test(cls, test_set):

        model = None
        if Config.get('model') == 'ssd':
            model = SSDModel()

        output_dir = Config.get('test').get('output_path')
        slide_show = Config.get('test').get('slide_show')
        json_lines = []

        results = model.test(test_set, show=slide_show)

        for instance, result in zip(test_set, results):
            json_lines.append(cls._serialize(instance[0], result))

        with open(output_dir, 'w+') as f:
            f.writelines(json_lines)

    @classmethod
    def _serialize(self, key, result):
        """
        Neither json / ujson works. Implementing my own serializer.
        :return:
        """
        return '{{"{}": {}}}\n'.format(key, str(result))
Esempio n. 16
0
class EncoderLayer(nn.Module):
    """Encoder layer block"""
    def __init__(self, is_base: bool = True):
        super().__init__()
        self.config = Config()
        self.config.add_model(is_base)

        self.mha = MultiHeadAttention(masked_attention=False)
        self.attention_dropout = nn.Dropout(
            p=self.config.model.model_params.dropout)
        self.ln = LayerNorm(self.config.model.train_hparams.eps)
        self.ffn = FeedForwardNetwork()
        self.residual_dropout = nn.Dropout(
            p=self.config.model.model_params.dropout)

    def forward(self, source_emb: Tensor,
                source_mask: Tensor) -> Tuple[Tensor, Tensor]:
        source_emb = source_emb + self.mha(
            query=source_emb, key=source_emb, value=source_emb)
        source_emb = self.attention_dropout(source_emb)
        source_emb = self.ln(source_emb)
        source_emb = source_emb + self.residual_dropout(self.ffn(source_emb))
        source_emb = self.ln(source_emb)
        return source_emb, source_mask
    def __init__(self, langpair: str, is_base: bool = True) -> None:
        super().__init__()
        configs = Config()
        configs.add_tokenizer(langpair)
        configs.add_model(is_base)
        dim_model: int = configs.model.model_params.dim_model
        vocab_size = configs.tokenizer.vocab_size

        self.encoder = Encoder(langpair)
        self.decoder = Decoder(langpair)
        self.linear = nn.Linear(dim_model, vocab_size)
Esempio n. 18
0
class Processor(object):

    dataset_conf = Config.get('datasets')

    @classmethod
    def download(cls, name):
        for conf in cls.dataset_conf:
            if conf.get('name') == name:
                cls._download(name, conf.get('url'))
                return

        raise Exception('Data set {} not found in base.yaml'.format(name))

    @classmethod
    def get_raw_dataset_dir(cls, name):
        return '{}/raw/{}'.format(Config.get('data_raw_dir'), name)

    @classmethod
    def _download(cls, name, url):
        dataset_dir = cls.get_raw_dataset_dir(name)
        os.system('mkdir -p {}'.format(dataset_dir))
        os.system('wget {} -P {}'.format(url, dataset_dir))
Esempio n. 19
0
    def __init__(self, langpair: str, is_base: bool = True) -> None:
        super().__init__()
        # TODO: support transformer-base and transformer-big
        configs = Config()
        configs.add_model(is_base)
        configs.add_tokenizer(langpair)
        tokenizer = load_tokenizer(langpair)
        padding_idx = tokenizer.token_to_id("<pad>")

        self.dim_model: int = configs.model.model_params.dim_model
        self.vocab_size = configs.tokenizer.vocab_size
        self.embedding_matrix = nn.Embedding(self.vocab_size,
                                             self.dim_model,
                                             padding_idx=padding_idx)
        self.scale = self.dim_model**0.5
        self.max_len = configs.model.model_params.max_len
        self.positional_encoding = PositionalEncoding(self.max_len,
                                                      self.dim_model)
Esempio n. 20
0
class TestConfig(unittest.TestCase):
    def setUp(self):
        self.current_path = os.path.abspath(os.path.dirname(__file__))
        fullpath = os.path.join(self.current_path, "test_config")

        data = []
        data.append("[Ignore]")
        data.append("list = user1,user2,user3,user4,user5,user6")
        data.append("list3 = ")
        data.append("list4 = user10")
        data.append("")
        data.append("[Channels]")
        data.append("general = 000000000000000000001")
        data.append("test = 000000000000000000002")
        data.append("awesome = 000000000000000000003")
        data.append("asdf = 000000000000000000004")
        data.append("cool = 000000000000000000005")
        data.append("voice = 000000000000000000006")

        with open(fullpath, 'w') as f:
            f.write("\n".join(data))

        self.channel_config = Config(fullpath, "Channels")
        self.ignore_config = Config(fullpath, "Ignore")

    def tearDown(self):
        new_file = os.path.join(self.current_path, "new_file")
        if os.path.isfile(new_file):
            os.remove(new_file)

    def test_create_file(self):
        new_file = os.path.join(self.current_path, "new_file")
        self.assertFalse(os.path.isfile(new_file))
        Config(new_file, "MySection")
        self.assertTrue(os.path.isfile(new_file))
        os.remove(new_file)

    def test_get(self):
        channel_id = self.channel_config.get("test")
        self.assertEqual(channel_id, "000000000000000000002")

    def test_get_as_list(self):
        users = self.ignore_config.get_as_list("list")
        user_list = ["user1", "user2", "user3", "user4", "user5", "user6"]
        self.assertListEqual(users, user_list)

    def test_get_all(self):
        channels = self.channel_config.get_all()
        channel_list = ["general", "test", "awesome", "asdf", "cool", "voice"]
        self.assertListEqual(channels, channel_list)

    def test_save(self):
        self.channel_config.save("newchannel", "111111111111111111")
        channel_id = self.channel_config.get("newchannel")
        self.assertEqual(channel_id, "111111111111111111")

    def test_append(self):
        self.ignore_config.append("list", "user9001")
        users = self.ignore_config.get("list")
        self.assertIn(",user9001", users)
        # Try adding again for code coverage
        self.ignore_config.append("list", "user9001")
        users = self.ignore_config.get("list")
        self.assertIn(",user9001", users)
        # Try adding it to a new option to assert option is created
        self.ignore_config.append("list2", "user9001")
        users = self.ignore_config.get("list2")
        self.assertIn("user9001", users)
        # Try adding it to an empty option
        self.ignore_config.append("list3", "user9001")
        users = self.ignore_config.get("list3")
        self.assertIn("user9001", users)

    def test_truncate(self):
        # Try a option that doesn't exist
        self.ignore_config.truncate("list2", "user3")
        does_have = self.ignore_config.has("list2")
        self.assertFalse(does_have)
        for x in range(0, 6):
            user = "******".format(x)
            self.ignore_config.truncate("list", user)
            users = self.ignore_config.get("list")
            self.assertNotIn(user, users, "Running " + user)

        # Test deleting the last one deletes the entire option
        self.ignore_config.truncate("list", "user6")
        with self.assertRaises(configparser.NoOptionError):
            # get raises and exception if it can't find the option. This is what we are testing.
            self.ignore_config.get("list")

    def test_delete(self):
        self.channel_config.delete("asdf")
        channels = self.channel_config.get_all()
        self.assertNotIn("asdf", channels)

    def test_has(self):
        does_contain = self.channel_config.has("awesome")
        self.assertTrue(does_contain)

    def test_contains(self):
        # Test list contains
        does_contain = self.ignore_config.contains("list", "user5")
        self.assertTrue(does_contain)
        # Test missing option doesn't contain
        does_contain = self.ignore_config.contains("list2", "user5")
        self.assertFalse(does_contain)
        # Test empty option doesn't contain
        does_contain = self.ignore_config.contains("list3", "user5")
        self.assertFalse(does_contain)
        # Test non-list contains
        does_contain = self.ignore_config.contains("list4", "user10")
        self.assertTrue(does_contain)
        # Test non-list doesn't contain
        does_contain = self.ignore_config.contains("list4", "user5")
        self.assertFalse(does_contain)
Esempio n. 21
0
    def __init__(self,
                 n_words,
                 n_feats,
                 n_labels,
                 feat='char',
                 n_embed=100,
                 n_feat_embed=100,
                 n_char_embed=50,
                 bert=None,
                 n_bert_layers=4,
                 mix_dropout=.0,
                 embed_dropout=.33,
                 n_lstm_hidden=400,
                 n_lstm_layers=3,
                 lstm_dropout=.33,
                 n_mlp_span=500,
                 n_mlp_label=100,
                 mlp_dropout=.33,
                 feat_pad_index=0,
                 pad_index=0,
                 unk_index=1,
                 **kwargs):
        super().__init__()

        self.args = Config().update(locals())
        # the embedding layer
        self.word_embed = nn.Embedding(num_embeddings=n_words,
                                       embedding_dim=n_embed)
        if feat == 'char':
            self.feat_embed = CharLSTM(n_chars=n_feats,
                                       n_embed=n_char_embed,
                                       n_out=n_feat_embed,
                                       pad_index=feat_pad_index)
        elif feat == 'bert':
            if kwargs['bert_requires_grad'] == 'False':
                bert_requires_grad = False
            elif kwargs['bert_requires_grad'] == 'True':
                bert_requires_grad = True
            if bert_requires_grad:
                self.feat_embed = BertEmbeddingfinetuning(model=bert,
                                                          n_layers=n_bert_layers,
                                                          n_out=n_feat_embed,
                                                          pad_index=feat_pad_index,
                                                          dropout=mix_dropout)
            else:
                self.feat_embed = BertEmbedding(model=bert,
                                                n_layers=n_bert_layers,
                                                n_out=n_feat_embed,
                                                pad_index=feat_pad_index,
                                                dropout=mix_dropout)
            self.n_feat_embed = self.feat_embed.n_out
        elif feat == 'tag':
            self.feat_embed = nn.Embedding(num_embeddings=n_feats,
                                           embedding_dim=n_feat_embed)
        else:
            raise RuntimeError("The feat type should be in ['char', 'bert', 'tag'].")
        self.embed_dropout = IndependentDropout(p=embed_dropout)

        # the lstm layer
        self.lstm = BiLSTM(input_size=n_embed + n_feat_embed,
                           hidden_size=n_lstm_hidden,
                           num_layers=n_lstm_layers,
                           dropout=lstm_dropout)
        self.lstm_dropout = SharedDropout(p=lstm_dropout)
        self.decoder_layers = n_lstm_layers
        # the MLP layers
        # self.mlp_span_l = MLP(n_in=n_lstm_hidden*2,
        #                       n_out=n_mlp_span,
        #                       dropout=mlp_dropout)
        # self.mlp_span_r = MLP(n_in=n_lstm_hidden*2,
        #                       n_out=n_mlp_span,
        #                       dropout=mlp_dropout)
        self.mlp_span_splitting = MLP(n_in=n_lstm_hidden * 2,
                                      n_out=n_mlp_span,
                                      dropout=mlp_dropout)
        self.mlp_label_l = MLP(n_in=n_lstm_hidden * 2,
                               n_out=n_mlp_label,
                               dropout=mlp_dropout)
        self.mlp_label_r = MLP(n_in=n_lstm_hidden * 2,
                               n_out=n_mlp_label,
                               dropout=mlp_dropout)

        # the Biaffine layers
        # self.span_attn = Biaffine(n_in=n_mlp_span,
        #                           bias_x=True,
        #                           bias_y=False)
        self.label_attn = Biaffine(n_in=n_mlp_label,
                                   n_out=n_labels,
                                   bias_x=True,
                                   bias_y=True)
        # self.crf = CRFConstituency()
        # self.criterion = nn.CrossEntropyLoss()
        self.pad_index = pad_index
        self.unk_index = unk_index
        self.hx_dense = nn.Linear(2 * n_lstm_hidden, 2 * n_lstm_hidden)
Esempio n. 22
0
    def __init__(self,
                 n_words,
                 n_feats,
                 n_labels,
                 feat='char',
                 n_embed=100,
                 n_feat_embed=100,
                 n_char_embed=50,
                 bert=None,
                 n_bert_layers=4,
                 mix_dropout=.0,
                 embed_dropout=.33,
                 n_lstm_hidden=400,
                 n_lstm_layers=3,
                 lstm_dropout=.33,
                 n_mlp_span=500,
                 n_mlp_label=100,
                 mlp_dropout=.33,
                 feat_pad_index=0,
                 pad_index=0,
                 unk_index=1,
                 **kwargs):
        super().__init__()

        self.args = Config().update(locals())
        # the embedding layer
        self.word_embed = nn.Embedding(num_embeddings=n_words,
                                       embedding_dim=n_embed)
        if feat == 'char':
            self.feat_embed = CharLSTM(n_chars=n_feats,
                                       n_embed=n_char_embed,
                                       n_out=n_feat_embed,
                                       pad_index=feat_pad_index)
        elif feat == 'bert':
            self.feat_embed = BertEmbedding(model=bert,
                                            n_layers=n_bert_layers,
                                            n_out=n_feat_embed,
                                            pad_index=feat_pad_index,
                                            dropout=mix_dropout)
            self.n_feat_embed = self.feat_embed.n_out
        elif feat == 'tag':
            self.feat_embed = nn.Embedding(num_embeddings=n_feats,
                                           embedding_dim=n_feat_embed)
        else:
            raise RuntimeError("The feat type should be in ['char', 'bert', 'tag'].")
        self.embed_dropout = IndependentDropout(p=embed_dropout)

        # the lstm layer
        self.token_lstm = BiLSTM(input_size=n_embed + n_feat_embed,
                                 hidden_size=n_lstm_hidden,
                                 num_layers=n_lstm_layers,
                                 dropout=lstm_dropout)
        self.token_lstm_dropout = SharedDropout(p=lstm_dropout)

        self.edu_lstm = BiLSTM(input_size=n_lstm_hidden * 2,
                               hidden_size=n_lstm_hidden,
                               num_layers=n_lstm_layers,
                               dropout=lstm_dropout)
        self.edu_lstm_dropout = SharedDropout(p=lstm_dropout)
        self.decoder_layers = n_lstm_layers

        self.mlp_span_splitting = MLP(n_in=n_lstm_hidden * 2,
                                      n_out=n_mlp_span,
                                      dropout=mlp_dropout)

        self.pad_index = pad_index
        self.unk_index = unk_index
        self.hx_dense = nn.Linear(2 * n_lstm_hidden, 2 * n_lstm_hidden)
Esempio n. 23
0
from comet_ml import Experiment

import torch
from torchvision import transforms

from src.dataset import Places2
from src.model import PConvUNet
from src.loss import InpaintingLoss, VGG16FeatureExtractor
from src.train import Trainer
from src.utils import Config, load_ckpt, create_ckpt_dir


# set the config
config = Config("config.yml")
config.ckpt = create_ckpt_dir()
print("Check Point is '{}'".format(config.ckpt))

# Define the used device
device = torch.device("cuda:{}".format(config.cuda_id)
                      if torch.cuda.is_available() else "cpu")

# Define the model
print("Loading the Model...")
model = PConvUNet(finetune=config.finetune,
                  layer_size=config.layer_size)
if config.finetune:
    model.load_state_dict(torch.load(config.finetune)['model'])
model.to(device)


# Data Transformation
Esempio n. 24
0
    train_dataloader, val_dataloader = get_dataloader(cfg)

    # Create model
    Model = getattr(__import__("src"), cfg.model.name)
    runner = Model(cfg.model.params)

    # Set trainer (pytorch lightening)
    os.makedirs(cfg.model.ckpt.path, exist_ok=True)
    trainer = pl.Trainer(
        logger=wandb_logger,
        gpus=-1 if torch.cuda.is_available() else 0,
        max_epochs=cfg.model.params.max_epochs,
        deterministic=True,
        checkpoint_callback=ModelCheckpoint(cfg.model.ckpt.path),
    )

    # Train
    trainer.fit(
        runner, train_dataloader=train_dataloader, val_dataloaders=val_dataloader
    )


if __name__ == "__main__":
    args = parse_args()

    cfg = Config()
    cfg.add_dataset(args.cfg_dataset)
    cfg.add_model(args.cfg_model)

    run(cfg, args.wandb)
Esempio n. 25
0
    def load(cls, path, **kwargs):
        r"""
        Load data fields and model parameters from a pretrained parser.

        Args:
            path (str):
                - a string with the shortcut name of a pre-trained parser defined in supar.PRETRAINED
                  to load from cache or download, e.g., `crf-dep-en`.
                - a path to a directory containing a pre-trained parser, e.g., `./<path>/model`.
            kwargs (dict):
                A dict holding the unconsumed arguments.

        Returns:
            The loaded parser.
        """

        args = Config(**locals())
        args.device = 'cuda' if torch.cuda.is_available() else 'cpu'

        if os.path.exists(path):
            state = torch.load(path, map_location=args.device)

        args = state['args'].update(args)
        args.device = 'cpu'

        model = cls.MODEL(**args)

        # print(cls.WORD.embed)
        # model.load_pretrained(cls.WORD.embed).to(args.device)
        # parser = cls.load(**args)
        # parser.model = cls.MODEL(**parser.args)
        # parser.model.load_pretrained(parser.WORD.embed).to(args.device)
        # print(parser.WORD.embed)

        # parser.model.to(args.device)

        # if os.path.exists(path):  # and not args.build:
        #     parser = cls.load(**args)
        #     parser.model = cls.MODEL(**parser.args)
        #     parser.model.load_pretrained(parser.WORD.embed).to(args.device)
        #     return parser

        # parser = cls.load(**args)

        # print(parser.CHART)
        # print(vars(parser.CHART.vocab))

        transform = state['transform']

        if state['pretrained']:
            model.load_pretrained(state['pretrained']).to(args.device)
        else:
            parser = cls(args, model, transform)
            model.load_pretrained(parser.WORD.embed).to(args.device)

        # print(state['state_dict'])

        model.load_state_dict(state['state_dict'])
        model.eval()
        model.to(args.device)

        parser.model = model
        parser.args = args
        parser.transform = transform

        if parser.args.feat in ('char', 'bert'):
            parser.WORD, parser.FEAT = parser.transform.WORD
        else:
            parser.WORD, parser.FEAT = parser.transform.WORD, parser.transform.POS
        parser.EDU_BREAK = parser.transform.EDU_BREAK
        parser.GOLD_METRIC = parser.transform.GOLD_METRIC
        # self.TREE = self.transform.TREE
        try:
            parser.CHART = parser.transform.CHART
            parser.PARSINGORDER = parser.transform.PARSINGORDER
        except:
            print(
                'parser.CHART and parser.PARSINGORDER parameters are not available for this model.'
            )

        return parser
Esempio n. 26
0
# -*- coding: utf-8 -*-
import json
import unittest

from test.API_test.common.BaseCaseOperate import BaseCaseOperate
from test.API_test.common.Merchant import Merchant

from src.utils import Config
from src.utils import ReadXML

url_xml = Config().get('data', 'url_xml')


def generator(datafile='shopping.xlsx',
              sheet_name='sheet0',
              userid=None,
              prepare=False):
    u"""生成测试用例方法,传入data文件与sheet名,得到class"""
    class TestStuff(unittest.TestCase):
        #  基本类,同一类型的接口都可以用这个类来组织测试用例
        def setUp(self):
            u"""在setup中新生成一个user与merchant"""
            self.url = ReadXML(url_xml).get_url(sheet_name)
            print u'接口地址:{0}'.format(self.url)
            if userid is None:
                self.merchant = Merchant()
                self.merchant.add()
                self.userid = self.merchant.getuserid()
            else:
                self.userid = userid
Esempio n. 27
0
    def __init__(self):
        parser = optparse.OptionParser(
            description=DESCRIPTION,
            usage='Usage: %prog [options] [host [port]]',
            version='argus ' + __version__)
        parser.add_option(
            '-p',
            '--password',
            help=
            'provide local password on command-line rather than being prompted'
        )
        parser.add_option('-d',
                          '--dashboard',
                          help='name of dashboard to auto-execute into')
        parser.add_option(
            '-j',
            '--jenkins_report',
            help=
            'TODO:#106 execute and print a jenkins report, exiting upon completion',
            default=False,
            action='store_true')
        parser.add_option(
            '-n',
            '--jenkins_project_name',
            help='Name of consistent root of project names in Jenkins')
        parser.add_option(
            '-b',
            '--jenkins_branch',
            help='TODO:#107 Used with -j, specify branch to run reports against'
        )
        parser.add_option(
            '-t',
            '--jenkins_type',
            help=
            'TODO:#108 Used with -j, specify type of test [u]nit test, or [d]test to report against'
        )
        # parser.add_option('-c', '--triage_csv', help='Specifies local file containing [link, key, summary, assignee, reviewer, status, prio, repro, scope, component] triage file to update against live JIRA data')
        # parser.add_option('-o', '--triage_out', help='Output file name for updated triage data. If not provided, prints to stdout.')
        parser.add_option(
            '-u',
            '--unit_test',
            help=
            'Unit testing mode, does not connect servers, saves config changes to test/ folder',
            action='store_true',
            dest='unit_test')
        parser.add_option(
            '-v',
            '--verbose',
            help='Log verbose debug output to console and argus.log',
            action='store_true',
            dest='verbose')

        optvalues = optparse.Values()
        (options, arguments) = parser.parse_args(sys.argv[1:],
                                                 values=optvalues)

        signal.signal(signal.SIGINT, self.signal_handler)

        if hasattr(options, 'verbose'):
            utils.debug = True
            utils.argus_log = open('argus.log', 'w')

        Config.init_argus()

        # determine if this is a first run, prompt differently pending that
        msg = None
        if hasattr(options, 'password'):
            Config.MenuPass = options.password
        else:
            msg = 'Enter Argus Password (local JIRA credentials will be encrypted with this):'

        while Config.MenuPass == '':
            Config.MenuPass = getpass(msg)

        if hasattr(options, 'unit_test'):
            if os.path.exists('test'):
                shutil.rmtree('test')
            os.mkdir('test')
            os.mkdir(os.path.join('test', 'conf'))
            os.mkdir(os.path.join('test', 'data'))
            utils.unit_test = True

            # Run one-off test on time utils
            # now = time_converter.current_time()
            # print('Now is: {}'.format(now))
            #
            # four_weeks_ago = time_converter.since_now('-4w')
            # print('Four weeks ago is: {}'.format(four_weeks_ago))
            #
            # one_m_two_days_ago = time_converter.since_now('-1m -2d')
            # print('One M 2 D ago is: {}'.format(one_m_two_days_ago))
            # exit(-1)

        try:
            self._team_manager = TeamManager.from_file()
        except ConfigError as ce:
            print('ConfigError: {}. Initializing empty JiraManager'.format(ce))
            self._team_manager = TeamManager()
        self._jira_manager = JiraManager(self._team_manager)
        self._jenkins_manager = JenkinsManager(self)

        if hasattr(options, 'triage_csv'):
            jira_connections = {}
            for jira_connection in self._jira_manager.jira_connections():
                jira_connections[
                    jira_connection.connection_name] = jira_connection
            tu = TriageUpdate(
                jira_connections,
                self._jira_manager.get_all_cached_jira_projects())
            triage_out = options.triage_out if hasattr(options,
                                                       'triage_out') else None
            tu.process(options.triage_csv, triage_out)

        if hasattr(options, 'dashboard'):
            user_key = optvalues.__dict__['dashboard']
            dash_keys = list(self._jira_manager.jira_dashboards.keys())

            if user_key in dash_keys:
                self._jira_manager.jira_dashboards[user_key].display_dashboard(
                    self._jira_manager.jira_views)
            else:
                print('Oops... Error with dashboard name {}'.format(user_key))
                print('Possible dashboard names : {}'.format(
                    ','.join(dash_keys)))
                print('Starting Argus normally...')

        if hasattr(options, 'verbose'):
            utils.debug = True
            utils.argus_log = open('argus.log', 'w')

        self._display_filter = DisplayFilter()

        self.main_menu = [
            MenuOption('d',
                       'Dashboards',
                       self.go_to_dashboards_menu,
                       pause=False),
            MenuOption('v',
                       'Jira Views',
                       self.go_to_jira_views_menu,
                       pause=False),
            MenuOption('p',
                       'JiraProject Queries',
                       self.go_to_projects_menu,
                       pause=False),
            MenuOption.print_blank_line(),
            MenuOption('t',
                       'Run a Team-Based Report',
                       self._run_team_report,
                       pause=False),
            MenuOption('e',
                       'View Escalations',
                       self._jira_manager.display_escalations,
                       pause=False),
            MenuOption.print_blank_line(),
            MenuOption('r',
                       'Generate a Pre-Determined Report',
                       self.go_to_reports_menu,
                       pause=False),
            MenuOption('m',
                       'Team Management',
                       self.go_to_teams_menu,
                       pause=False),
            MenuOption('c',
                       'Jira Connections',
                       self.go_to_jira_connections_menu,
                       pause=False),
            MenuOption.print_blank_line(),
            MenuOption('j',
                       'Jenkins Menu',
                       self.go_to_jenkins_menu,
                       pause=False),
            MenuOption.print_blank_line(),
            MenuOption('o',
                       'Change Options',
                       self.go_to_options_menu,
                       pause=False),
            MenuOption('x', 'Debug', self._jira_manager.run_debug,
                       pause=False),
            MenuOption.print_blank_line(),
            MenuOption('h', 'Help', self._display_readme, pause=False),
            MenuOption.quit_program()
        ]

        self.dashboards_menu = [
            MenuOption('l', 'List all available dashboards',
                       self._jira_manager.list_dashboards),
            MenuOption('d', 'Display a dashboard\'s results',
                       self._jira_manager.display_dashboard),
            MenuOption('c', 'Create a dashboard',
                       self._jira_manager.add_dashboard),
            MenuOption('e', 'Edit a dashboard',
                       self._jira_manager.edit_dashboard),
            MenuOption('r', 'Remove a dashboard',
                       self._jira_manager.remove_dashboard),
            MenuOption.print_blank_line(),
            MenuOption.return_to_previous_menu(self.go_to_main_menu)
        ]

        self.jira_views_menu = [
            MenuOption('l', 'List all defined JiraViews',
                       self._jira_manager.list_all_jira_views),
            MenuOption('d', 'Display a JiraView\'s results',
                       self._jira_manager.display_view),
            MenuOption('a', 'Add a JiraView', self._add_view),
            MenuOption('e', 'Edit a JiraView', self._edit_view),
            MenuOption('r', 'Remove a JiraView',
                       self._jira_manager.remove_view),
            MenuOption.print_blank_line(),
            MenuOption.return_to_previous_menu(self.go_to_main_menu)
        ]

        self.reports_menu = [
            MenuOption(
                'f',
                'FixVersion report (release). Query all tickets with a specified FixVersion',
                self._jira_manager.report_fix_version),
            MenuOption('s',
                       'Add a single-user multi-JIRA open ticket dashboard',
                       self._add_multi_jira_dashboard),
            MenuOption('l', 'Add a label-based cross-cutting view',
                       self._jira_manager.add_label_view),
            MenuOption.print_blank_line(),
            MenuOption.return_to_previous_menu(self.go_to_main_menu)
        ]

        self.team_menu = [
            MenuOption('l', 'List all defined Teams',
                       self._team_manager.list_teams),
            MenuOption('a', 'Add a new team', self._add_team),
            MenuOption('e', 'Edit an existing team', self._edit_team),
            MenuOption('r', 'Remove a team', self._remove_team),
            MenuOption(
                'x',
                'Link a team member to two accounts across JiraConnections',
                self.add_linked_member),
            MenuOption('d', 'Delete a cross-Jira link',
                       self._team_manager.remove_linked_member),
            MenuOption.print_blank_line(),
            MenuOption.return_to_previous_menu(self.go_to_main_menu)
        ]

        self.jira_connections_menu = [
            MenuOption('a', 'Add a JIRA connection',
                       self._jira_manager.add_connection),
            MenuOption('r', 'Remove a JIRA connection and all related views',
                       self._jira_manager.remove_connection),
            MenuOption(
                'c',
                'Cache offline ticket data for a JiraProject on a connection',
                self._jira_manager.cache_new_jira_project_data),
            MenuOption(
                'd',
                'Delete offline cached ticket data for a JiraProject on a connection',
                self._jira_manager.delete_cached_jira_project),
            MenuOption('l', 'List all configured Jiraconnections',
                       self._jira_manager.list_jira_connections),
            MenuOption.print_blank_line(),
            MenuOption.return_to_previous_menu(self.go_to_main_menu)
        ]

        self.jenkins_menu = [
            MenuOption('r',
                       'Reports Manager',
                       self.go_to_jenkins_reports_manager_menu,
                       pause=False),
            MenuOption('c',
                       'Connections Manager',
                       self.go_to_jenkins_connections_manager_menu,
                       pause=False),
            MenuOption.print_blank_line(),
            MenuOption.return_to_previous_menu(self.go_to_main_menu)
        ]

        self.jenkins_reports_manager_menu = [
            MenuOption('o',
                       'Open custom report',
                       self._jenkins_manager.select_active_report,
                       pause=False),
            MenuOption('a',
                       'Add a custom report',
                       self._jenkins_manager.add_custom_report,
                       pause=False),
            MenuOption('r',
                       'Remove a custom report',
                       self._jenkins_manager.remove_custom_report,
                       pause=False),
            MenuOption('l', 'List custom reports',
                       self._jenkins_manager.list_custom_reports),
            MenuOption.print_blank_line(),
            MenuOption.return_to_previous_menu(self.go_to_jenkins_menu)
        ]

        self.jenkins_report_menu = [
            MenuOption('v', 'View report',
                       self._jenkins_manager.view_custom_report),
            MenuOption('a',
                       'Add a job',
                       self._jenkins_manager.add_custom_report_job,
                       pause=False),
            MenuOption('r',
                       'Remove a job',
                       self._jenkins_manager.remove_custom_report_job,
                       pause=False),
            MenuOption.print_blank_line(),
            MenuOption.return_to_previous_menu(
                self.go_to_jenkins_reports_manager_menu)
        ]

        self.jenkins_connections_manager_menu = [
            MenuOption('o',
                       'Open connection',
                       self._jenkins_manager.select_active_connection,
                       pause=False),
            MenuOption('a',
                       'Add a connection',
                       self._jenkins_manager.add_connection,
                       pause=False),
            MenuOption('r',
                       'Remove a connection',
                       self._jenkins_manager.remove_connection,
                       pause=False),
            MenuOption('l', 'List connections',
                       self._jenkins_manager.list_connections),
            MenuOption.print_blank_line(),
            MenuOption.return_to_previous_menu(self.go_to_jenkins_menu)
        ]

        self.jenkins_connection_menu = [
            MenuOption('v',
                       'View cached jobs',
                       self._jenkins_manager.view_cached_jobs,
                       pause=False),
            MenuOption('d',
                       'Download jobs to cache',
                       self._jenkins_manager.download_jobs,
                       pause=False),
            MenuOption.print_blank_line(),
            MenuOption('l', 'List saved views',
                       self._jenkins_manager.list_views),
            MenuOption('a',
                       'Add a view',
                       self._jenkins_manager.add_view,
                       pause=False),
            MenuOption('r',
                       'Remove a view',
                       self._jenkins_manager.remove_view,
                       pause=False),
            MenuOption.print_blank_line(),
            MenuOption.return_to_previous_menu(
                self.go_to_jenkins_connections_manager_menu)
        ]

        self.options_menu = [
            MenuOption('p', 'Change Argus password', self._change_password),
            MenuOption('b', 'Change browser', self._change_browser),
            MenuOption('v', 'Toggle Verbose/Debug', self._change_debug),
            MenuOption('d', 'Toggle Display dependencies',
                       self._change_show_dependencies),
            MenuOption('o', 'Toggle show open dependencies only',
                       self._change_dependency_type),
            MenuOption.print_blank_line(),
            MenuOption.return_to_previous_menu(self.go_to_main_menu)
        ]

        self.projects_menu = [
            MenuOption('l',
                       'List locally cached projects',
                       self._jira_manager.list_projects,
                       pause=True),
            MenuOption('s',
                       'Search locally cached JiraIssues for a string',
                       self._jira_manager.search_projects,
                       pause=False),
            MenuOption('a',
                       'Add new JiraProject offline cache',
                       self._jira_manager.cache_new_jira_project_data,
                       pause=True),
            MenuOption(
                'd',
                'Delete offline cached ticket data for a JiraProject on a connection',
                self._jira_manager.delete_cached_jira_project),
            MenuOption('u',
                       'Update all locally cached project JIRA data',
                       self._jira_manager.update_cached_jira_project_data,
                       pause=False),
            MenuOption.print_blank_line(),
            MenuOption.return_to_previous_menu(self.go_to_main_menu)
        ]

        self.active_menu = None
        self.menu_header = None
        self.go_to_main_menu()

        self._load_config()

        # let user read startup info
        pause()
Esempio n. 28
0
from src.utils import Config, get_training_data, set_timezone, tag_date_time, sentence_similarity
import nltk
from src.utils import pick_random_test_image, copy_file_to_correct_folder, predict_image_caption, find_bleu_score, process_predicted_tokens

set_timezone()

if __name__ == "__main__":

    transform_test = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])

    config = Config("config.yaml")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    test_data_loader = get_data_loader(
        transform=transform_test,
        caption_file=config.CAPTION_FILE,
        image_id_file=config.IMAGE_ID_FILE_TEST,
        image_folder=config.IMAGE_DATA_DIR,
        config=config,
        vocab_file=config.VOCAB_FILE,
        mode="test",
    )

    # TODO #2: Specify the saved models to load.
    print(f"DEV MODE: {config.DEV_MODE}")
Esempio n. 29
0
class Attention(nn.Module):
    """Compute scaled-dot product attention of Transformer

    Attributes:
        masked_attention: whether to mask attention or not
    """
    def __init__(self,
                 masked_attention: bool = False,
                 is_base: bool = True) -> None:
        super().__init__()
        self.masked_attention = masked_attention
        self.config = Config()
        self.config.add_model(is_base)

        self.dim_q: int = self.config.model.model_params.dim_q
        self.dim_k: int = self.config.model.model_params.dim_k
        self.dim_v: int = self.config.model.model_params.dim_v
        self.dim_model: int = self.config.model.model_params.dim_model
        if self.masked_attention:
            assert (
                self.dim_k == self.dim_v
            ), "masked self-attention requires key, and value to be of the same size"
        else:
            assert (
                self.dim_q == self.dim_k == self.dim_v
            ), "self-attention requires query, key, and value to be of the same size"

        self.q_project = nn.Linear(self.dim_model, self.dim_q)
        self.k_project = nn.Linear(self.dim_model, self.dim_k)
        self.v_project = nn.Linear(self.dim_model, self.dim_v)
        self.scale = self.dim_k**-0.5

    def forward(
        self,
        query: Tensor,
        key: Tensor,
        value: Tensor,
        attention_mask: Optional[Tensor] = None,
    ) -> Tuple[Tensor, Tensor]:
        """
        Args:
            query: query embedding (batch_size, max_len, dim_model)
            key: key embedding (batch_size, max_len, dim_model)
            value: value embedding (batch_size, max_len, dim_model)
            attention_mask: used to implement masked_attention (batch_size, max_len, max_len)
        """
        if self.masked_attention:
            assert (
                key == value
            ), "masked self-attention requires key, and value to be of the same"
            assert (
                attention_mask
                is not None), "masked self-attention requires attention mask"
        else:
            assert (
                query == key == value
            ), "self-attention requires query, key, and value to be of the same"

        q = self.q_project(query)  # (batch_size, max_len, dim_q)
        k = self.k_project(key)  # (batch_size, max_len, dim_k)
        v = self.v_project(value)  # (batch_size, max_len, dim_v)

        qk = (torch.bmm(q, k.transpose(1, 2)) * self.scale
              )  # (batch_size, max_len, max_len)
        qk = qk.masked_fill(qk == 0, self.config.model.train_hparams.eps)

        if self.masked_attention:
            qk = qk.masked_fill(attention_mask == 0,
                                self.config.model.train_hparams.eps)

        attention_weight = torch.softmax(qk, dim=-1)
        attention = torch.matmul(attention_weight,
                                 v)  # (batch_size, max_len, dim_v)
        return attention, attention_weight
Esempio n. 30
0

@click.command()
@click.argument("path_to_units")
@click.argument("path_to_eez")
@click.argument("path_to_shared_coast")
@click.argument("path_to_capacities_pv_prio")
@click.argument("path_to_capacities_wind_prio")
@click.argument("path_to_electricity_yield_pv_prio")
@click.argument("path_to_electricity_yield_wind_prio")
@click.argument("path_to_eligibility_categories")
@click.argument("path_to_land_cover")
@click.argument("path_to_protected_areas")
@click.argument("path_to_result")
@click.argument("scenario")
@click.argument("config", type=Config())
def potentials(path_to_units, path_to_eez, path_to_shared_coast,
               path_to_capacities_pv_prio, path_to_capacities_wind_prio,
               path_to_electricity_yield_pv_prio,
               path_to_electricity_yield_wind_prio,
               path_to_eligibility_categories, path_to_land_cover,
               path_to_protected_areas, path_to_result, scenario, config):
    """Determine potential of renewable electricity in each administrative unit.

    * Take the (only technically restricted) raster data potentials,
    * add restrictions based on scenario definitions,
    * allocate the onshore potentials to the administrative units,
    * allocate the offshore potentials to exclusive economic zones (EEZ),
    * allocate the offshore potential of EEZ to units based on the fraction of shared coast.
    """
    with rasterio.open(path_to_eligibility_categories, "r") as src:
Esempio n. 31
0
from comet_ml import Experiment

import torch
from torchvision import transforms

from src.dataset import Places2
from src.model import PConvUNet
from src.loss import InpaintingLoss, VGG16FeatureExtractor
from src.train import Trainer
from src.utils import Config, load_ckpt, create_ckpt_dir


# set the config
config = Config("default_config.yml")
config.ckpt = create_ckpt_dir()
print("Check Point is '{}'".format(config.ckpt))

# Define the used device
device = torch.device("cuda:{}".format(config.cuda_id)
                      if torch.cuda.is_available() else "cpu")

# Define the model
print("Loading the Model...")
model = PConvUNet(finetune=config.finetune,
                  layer_size=config.layer_size)
if config.finetune:
    model.load_state_dict(torch.load(config.finetune)['model'])
model.to(device)


# Data Transformation