def cnn(tokens, msk, embedding_dim, num_filters, output_dim):
    cnn_module = CnnEncoder(embedding_dim=embedding_dim,
                            num_filters=num_filters,
                            output_dim=output_dim)
    output = cnn_module.forward(tokens=tokens, mask=msk)
    print(output.size())
    print(output)
Exemple #2
0
    def __init__(
        self,
        vocab: Vocabulary,
        text_field_embedder: TextFieldEmbedder,
        final_feedforward: FeedForward,
        initializer: InitializerApplicator = InitializerApplicator(),
        regularizer: Optional[RegularizerApplicator] = None,
    ) -> None:

        super().__init__(vocab, regularizer)

        # Model components
        self._embedder = text_field_embedder
        self._feed_forward = final_feedforward

        self._cnn_claim_encoder = CnnEncoder(
            embedding_dim=self._embedder.get_output_dim(), num_filters=100)
        self._cnn_evidence_encoder = CnnEncoder(
            embedding_dim=self._embedder.get_output_dim(), num_filters=100)

        self._static_feedforward_dimension = 300
        self._static_feedforward = FeedForward(
            input_dim=self._cnn_claim_encoder.get_output_dim() * 2,
            hidden_dims=self._static_feedforward_dimension,
            num_layers=1,
            activations=Activation.by_name('relu')())

        # For accuracy and loss for training/evaluation of model
        self._accuracy = CategoricalAccuracy()
        self._loss = nn.CrossEntropyLoss()

        # Initialize weights
        initializer(self)
Exemple #3
0
 def __init__(self, input_dim, num_filters, ngrams):
     super(CharCNN, self).__init__()
     self.cnn = CnnEncoder(
         input_dim,
         num_filters,
         ngram_filter_sizes=ngrams
     )
Exemple #4
0
    def __init__(self, arg):
        super(EncoderRNN3, self).__init__()
        self.IDFembeddings = nn.Embedding(arg.vocab_size, 200).cuda()
        self.IDFembeddings.weight.data.copy_(
            torch.from_numpy(np.load('../data/IDFweight.npy')))
        self.IDFembeddings.weight.data.requires_grad = False

        self.gate_state = nn.Linear(256, 512)
        self.to_gate = to_gate = nn.Linear(1024, 512)
        self.gate_output = nn.Linear(512, 512)
        self.cnnencoder = CnnEncoder(512, 64, output_dim=512).cuda()

        self.TFembeddings = nn.Embedding(50, 50).cuda()
        self.TFembeddings.weight.data.copy_(torch.from_numpy(np.eye(50)))
        self.TFembeddings.weight.data.requires_grad = False

        self.elmo_embed = enc_elmo(arg)

        self.rnn = nn.LSTM(arg.enc_elmo_lstm_dim + arg.embedding_size + 200 +
                           50,
                           arg.enc_hidden_size // 2,
                           arg.enc_num_layers,
                           batch_first=True,
                           bidirectional=True)

        for i in range(len(self.rnn.all_weights)):
            for j in range(len(self.rnn.all_weights[i])):
                try:
                    init.xavier_uniform_(self.rnn.all_weights[i][j])
                except:
                    pass
        self.dropout = nn.Dropout(p=0.3, inplace=True)
 def __init__(self, transformer: Seq2SeqEncoder, num_intents, num_tags,
              dropout):
     super().__init__()
     self._transformer = transformer
     self.num_intents = num_intents
     self.num_tags = num_tags
     hidden_size = self._transformer.get_output_dim()
     self.cnn_num_filters = 3
     self.cnn_ngram_filter_sizes = (2, )
     cnn_maxpool_output_dim = self.cnn_num_filters * len(
         self.cnn_ngram_filter_sizes)
     self._cnn_encoder = CnnEncoder(self.num_tags, self.cnn_num_filters,
                                    self.cnn_ngram_filter_sizes)
     self._feedforward = nn.Linear(transformer.get_output_dim(),
                                   hidden_size)
     self._intent_feedforward = nn.Linear(
         hidden_size + cnn_maxpool_output_dim, self.num_intents)
     self._tag_feedforward = nn.Linear(transformer.get_output_dim(),
                                       self.num_tags)
     self._norm_layer = nn.LayerNorm(self.num_tags)
     if dropout:
         self.dropout = torch.nn.Dropout(dropout)
     else:
         self.dropout = None
     torch.nn.init.xavier_uniform_(self._feedforward.weight)
     torch.nn.init.xavier_uniform_(self._intent_feedforward.weight)
     torch.nn.init.xavier_uniform_(self._tag_feedforward.weight)
     self._feedforward.bias.data.fill_(0)
     self._intent_feedforward.bias.data.fill_(0)
     self._tag_feedforward.bias.data.fill_(0)
class GatherCNN(torch.nn.Module):
    def __init__(self, input_dim, num_filters, output_dim):
        super().__init__()
        self.output_dim = output_dim
        self.cnn_module = CnnEncoder(embedding_dim=input_dim,
                                     num_filters=num_filters,
                                     output_dim=output_dim)

    def forward(self, tokens, msk=None):
        """

        :param tokens: batch_size, num_tokens, embedding_dim
        :param msk:
        :return:
        """
        output = self.cnn_module.forward(tokens=tokens, mask=msk)
        return output

    def get_output_dim(self):
        return self.output_dim
Exemple #7
0
def initialize_model(args, vocab, custom_model=None):
    word_embeddings = load_embedding(args, vocab)
    model_type = args.model if not custom_model else custom_model
    if "RNN" in args.model:
        encoder = PytorchSeq2VecWrapper(
            torch.nn.LSTM(args.embedding_dim,
                          hidden_size=args.hidden_dim,
                          num_layers=1,
                          batch_first=True))
    elif "CNN" in args.model:
        encoder = CnnEncoder(args.embedding_dim,
                             num_filters=15,
                             ngram_filter_sizes=(2, 3, 4),
                             conv_layer_activation=torch.nn.ReLU())
    model = GenericClassifier(word_embeddings,
                              encoder,
                              vocab,
                              args.num_class,
                              trapdoor=None,
                              trapdoor_class=args.target_labels[0],
                              smooth_eps=args.smooth_eps)
    model.cuda()
    model.train().cuda()  # rnn cannot do backwards in train mode
    return model, encoder, word_embeddings
Exemple #8
0
class FEVERTextClassificationModel(Model):
    def __init__(
        self,
        vocab: Vocabulary,
        text_field_embedder: TextFieldEmbedder,
        final_feedforward: FeedForward,
        initializer: InitializerApplicator = InitializerApplicator(),
        regularizer: Optional[RegularizerApplicator] = None,
    ) -> None:

        super().__init__(vocab, regularizer)

        # Model components
        self._embedder = text_field_embedder
        self._feed_forward = final_feedforward

        self._cnn_claim_encoder = CnnEncoder(
            embedding_dim=self._embedder.get_output_dim(), num_filters=100)
        self._cnn_evidence_encoder = CnnEncoder(
            embedding_dim=self._embedder.get_output_dim(), num_filters=100)

        self._static_feedforward_dimension = 300
        self._static_feedforward = FeedForward(
            input_dim=self._cnn_claim_encoder.get_output_dim() * 2,
            hidden_dims=self._static_feedforward_dimension,
            num_layers=1,
            activations=Activation.by_name('relu')())

        # For accuracy and loss for training/evaluation of model
        self._accuracy = CategoricalAccuracy()
        self._loss = nn.CrossEntropyLoss()

        # Initialize weights
        initializer(self)

    def forward(
            self,
            claim: Dict[str, torch.LongTensor],
            evidence: Dict[str, torch.LongTensor],
            label: torch.IntTensor = None,
            metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
        # pylint: disable=arguments-differ
        """
        Parameters
        ----------
        claim : Dict[str, torch.LongTensor]
            From a ``TextField``
            The LongTensor Shape is typically ``(batch_size, sent_length)`
        evidence : Dict[str, torch.LongTensor]
            From a ``TextField``
            The LongTensor Shape is typically ``(batch_size, sent_length)`
        label : torch.IntTensor, optional, (default = None)
            From a ``LabelField``
        metadata : ``List[Dict[str, Any]]``, optional, (default = None)
            Metadata containing the original tokenization of the claim and
            evidence sentences with 'claim_tokens' and 'premise_tokens' keys respectively.
        Returns
        -------
        An output dictionary consisting of:

        label_logits : torch.FloatTensor
            A tensor of shape ``(batch_size, num_labels)`` representing unnormalised log
            probabilities of the entailment label.
        label_probs : torch.FloatTensor
            A tensor of shape ``(batch_size, num_labels)`` representing probabilities of the
            entailment label.
        loss : torch.FloatTensor, optional
            A scalar loss to be optimised.
        """

        embedded_claim = self._embedder(claim)
        embedded_evidence = self._embedder(evidence)

        cnn_claim_features = self._cnn_claim_encoder.forward(
            embedded_claim, get_text_field_mask(claim))
        cnn_evidence_features = self._cnn_evidence_encoder.forward(
            embedded_evidence, get_text_field_mask(evidence))

        input_embeddings = torch.cat(
            (cnn_claim_features, cnn_evidence_features), dim=1)

        layer_x = self._static_feedforward(input_embeddings)

        label_logits = self._feed_forward(layer_x)

        label_probs = F.softmax(label_logits, dim=-1)

        output_dict = {
            "label_logits": label_logits,
            "label_probs": label_probs
        }

        if label is not None:
            loss = self._loss(label_logits, label.long().view(-1))
            self._accuracy(label_logits, label)
            output_dict["loss"] = loss

        if metadata is not None:
            output_dict["claim_tokens"] = [x["claim_tokens"] for x in metadata]
            output_dict["evidence_tokens"] = [
                x["evidence_tokens"] for x in metadata
            ]

        return output_dict

    def get_metrics(self, reset: bool = False) -> Dict[str, float]:
        return {
            'accuracy': self._accuracy.get_metric(reset),
        }
Exemple #9
0
    def __init__(self,
                 vocab: Vocabulary,
                 use_fp16,
                 text_field_embedder: TextFieldEmbedder,
                 transformer: Seq2SeqEncoder,
                 label_namespace: str = "labels",
                 tag_namespace: str = "tags",
                 label_encoding: Optional[str] = None,
                 include_start_end_transitions: bool = True,
                 constrain_crf_decoding: bool = None,
                 calculate_span_f1: bool = None,
                 calculate_intent_f1: bool = None,
                 dropout: Optional[float] = None,
                 wait_user_input=False,
                 verbose_metrics: bool = False,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super().__init__(vocab, regularizer)
        self.label_namespace = label_namespace
        self.tag_namespace = tag_namespace
        self._use_fp16 = use_fp16
        self._verbose_metrics = verbose_metrics
        self._text_field_embedder = text_field_embedder
        self._transformer = transformer
        self.num_intents = self.vocab.get_vocab_size(label_namespace)
        self.num_tags = self.vocab.get_vocab_size(tag_namespace)
        self.cnn_num_filters = 3
        self.cnn_ngram_filter_sizes = (2, 3)
        cnn_maxpool_output_dim = self.cnn_num_filters * len(
            self.cnn_ngram_filter_sizes)
        self.cnn_encoder = CnnEncoder(self.num_tags, self.cnn_num_filters,
                                      self.cnn_ngram_filter_sizes)
        hidden_size = transformer.get_output_dim()
        if dropout:
            self.dropout = torch.nn.Dropout(dropout)
        else:
            self.dropout = None

        if constrain_crf_decoding is None:
            constrain_crf_decoding = label_encoding is not None
        if calculate_span_f1 is None:
            calculate_span_f1 = label_encoding is not None

        self.label_encoding = label_encoding
        if constrain_crf_decoding:
            if not label_encoding:
                raise ConfigurationError("constrain_crf_decoding is True, but "
                                         "no label_encoding was specified.")
            tag_labels = self.vocab.get_index_to_token_vocabulary(
                tag_namespace)
            constraints = allowed_transitions(label_encoding, tag_labels)
        else:
            constraints = None

        self.include_start_end_transitions = include_start_end_transitions
        self.crf = ConditionalRandomField(
            self.num_tags,
            constraints,
            include_start_end_transitions=include_start_end_transitions)
        self._feedforward = nn.Linear(transformer.get_output_dim(),
                                      hidden_size)
        self._intent_feedforward = nn.Linear(
            hidden_size + cnn_maxpool_output_dim, self.num_intents)
        self._tag_feedforward = nn.Linear(transformer.get_output_dim(),
                                          self.num_tags)
        self._norm_layer = nn.LayerNorm(transformer.get_output_dim())
        torch.nn.init.xavier_uniform_(self._feedforward.weight)
        torch.nn.init.xavier_uniform_(self._intent_feedforward.weight)
        torch.nn.init.xavier_uniform_(self._tag_feedforward.weight)
        self._feedforward.bias.data.fill_(0)
        self._intent_feedforward.bias.data.fill_(0)
        self._tag_feedforward.bias.data.fill_(0)
        self._intent_accuracy = CategoricalAccuracy()
        self._intent_accuracy_3 = CategoricalAccuracy(top_k=3)
        self.metrics = {
            "slot_acc": CategoricalAccuracy(),
            "slot_acc3": CategoricalAccuracy(top_k=3)
        }
        self._intent_loss = torch.nn.CrossEntropyLoss()
        self.calculate_span_f1 = calculate_span_f1
        self.calculate_intent_f1 = calculate_intent_f1
        if calculate_span_f1:
            if not label_encoding:
                raise ConfigurationError("calculate_span_f1 is True, but "
                                         "no label_encoding was specified.")
            self._f1_metric = SpanBasedF1Measure(vocab,
                                                 tag_namespace=tag_namespace,
                                                 label_encoding=label_encoding)
        if self._use_fp16:
            self.half()
        # for name, p in self.named_parameters():
        #     print(name, p.size())
        initializer(self)
        if wait_user_input:
            input("Press Enter to continue...")
Exemple #10
0
def main():
	###############################################################################################
	prepare_global_logging(serialization_dir=args.serialization_dir, file_friendly_logging=False)
	#DATA
	reader = MathDatasetReader(source_tokenizer=CharacterTokenizer(),
	                        target_tokenizer=CharacterTokenizer(),
	                        source_token_indexers={'tokens': SingleIdTokenIndexer(namespace='tokens')},
	                        target_token_indexers={'tokens': SingleIdTokenIndexer(namespace='tokens')},
	                        target=False,
	                        label=True,
	                        lazy=False)
	# train_data = reader.read("../../datasets/math/label-data/train-all")
	# val_data = reader.read("../../datasets/math/label-data/interpolate")
	val_data = reader.read("./generate_files")


	vocab = Vocabulary()
	vocab.add_tokens_to_namespace([START_SYMBOL, END_SYMBOL, ' ', '!', "'", '(', ')', '*', '+', ',', '-', '.', '/',
	                                    '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', '<', '=', '>', '?',
	                                    'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
	                                    'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b',
	                                    'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p',
	                                    'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '}'], namespace='tokens')
	vocab.add_tokens_to_namespace(['algebra', 'arithmetic', 'calculus', 'comparison',
	  								 'measurement', 'numbers', 'polynomials', 'probability'], namespace='labels')



	# MODEL
	embedding = Embedding(num_embeddings=vocab.get_vocab_size('tokens'),
	                             embedding_dim=EMBEDDING_DIM)
	source_embedder = BasicTextFieldEmbedder({"tokens": embedding})

	if args.model == 'lstm':
		encoder = PytorchSeq2VecWrapper(torch.nn.LSTM(EMBEDDING_DIM, HIDDEN_DIM, 
											num_layers=NUM_LAYERS, batch_first=True))
	elif args.model == 'cnn':
		encoder = CnnEncoder(embedding_dim=EMBEDDING_DIM, num_filters=NUM_FILTERS, output_dim=HIDDEN_DIM)
	else:
		raise NotImplemented("The classifier model should be LSTM or CNN")


	model = TextClassifier(vocab=vocab,
				source_text_embedder=source_embedder,
	            encoder=encoder,
	            )
	model.to(device)


	if not Path(args.serialization_dir).exists() or not Path(args.serialization_dir).is_dir():
  		raise NotImplementedError("The model seems not to exist")
	with open(Path(args.serialization_dir) / "best.th", "rb") as model_path:
  		model_state = torch.load(model_path, map_location=nn_util.device_mapping(-1))
  		model.load_state_dict(model_state)
	model.eval()

	predictor = TextClassifierPredictor(model, dataset_reader=reader)

	# TEST
	correct = 0
	total = 0

	pbar = tqdm(val_data)
	batch_instance = list()
	batch_gt = list()

	idx_last = 0
	for idx, instance in enumerate(pbar):
		if idx != (idx_last + BATCH_SIZE):
			batch_instance.append(instance)
			batch_gt.append(instance.fields["labels"].label) # str
		else:
			idx_last = idx
			outputs = predictor.predict(batch_instance)
			for i, output in enumerate(outputs):
				if batch_gt[i] == output['predict_labels']:
					correct += 1
				total += 1
			batch_instance = list()
			batch_gt = list()
			pbar.set_description("correct/total %.3f" % (correct / total))
Exemple #11
0
def main():
    ###############################################################################################
    prepare_global_logging(serialization_dir=args.serialization_dir,
                           file_friendly_logging=False)
    #DATA
    reader = MathDatasetReader(source_tokenizer=CharacterTokenizer(),
                               target_tokenizer=CharacterTokenizer(),
                               source_token_indexers={
                                   'tokens':
                                   SingleIdTokenIndexer(namespace='tokens')
                               },
                               target_token_indexers={
                                   'tokens':
                                   SingleIdTokenIndexer(namespace='tokens')
                               },
                               target=False,
                               label=True,
                               lazy=True)
    train_data = reader.read("../../datasets/math/label-data/train-all")
    # val_data = reader.read("../../datasets/math/label-data/interpolate")

    vocab = Vocabulary()
    vocab.add_tokens_to_namespace([
        START_SYMBOL, END_SYMBOL, ' ', '!', "'", '(', ')', '*', '+', ',', '-',
        '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', '<',
        '=', '>', '?', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
        'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y',
        'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
        'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{',
        '}'
    ],
                                  namespace='tokens')
    vocab.add_tokens_to_namespace([
        'algebra', 'arithmetic', 'calculus', 'comparison', 'measurement',
        'numbers', 'polynomials', 'probability'
    ],
                                  namespace='labels')

    # MODEL
    embedding = Embedding(num_embeddings=vocab.get_vocab_size('tokens'),
                          embedding_dim=EMBEDDING_DIM)
    source_embedder = BasicTextFieldEmbedder({"tokens": embedding})

    if args.model == 'lstm':
        encoder = PytorchSeq2VecWrapper(
            torch.nn.LSTM(EMBEDDING_DIM,
                          HIDDEN_DIM,
                          num_layers=NUM_LAYERS,
                          batch_first=True))
    elif args.model == 'cnn':
        encoder = CnnEncoder(embedding_dim=EMBEDDING_DIM,
                             num_filters=NUM_FILTERS,
                             output_dim=HIDDEN_DIM)
    else:
        raise NotImplemented("The classifier model should be LSTM or CNN")

    model = TextClassifier(
        vocab=vocab,
        source_text_embedder=source_embedder,
        encoder=encoder,
    )
    model.to(device)

    optimizer = optim.Adam(model.parameters(),
                           lr=1e-3,
                           betas=(0.9, 0.995),
                           eps=1e-6)

    train_iterator = BucketIterator(batch_size=BATCH_SIZE,
                                    max_instances_in_memory=1024,
                                    sorting_keys=[("source_tokens",
                                                   "num_tokens")])
    train_iterator = MultiprocessIterator(train_iterator, num_workers=16)
    train_iterator.index_with(vocab)

    val_iterator = BucketIterator(batch_size=BATCH_SIZE,
                                  max_instances_in_memory=1024,
                                  sorting_keys=[("source_tokens", "num_tokens")
                                                ])
    val_iterator = MultiprocessIterator(val_iterator, num_workers=16)
    val_iterator.index_with(vocab)
    #pdb.set_trace()

    LR_SCHEDULER = {"type": "exponential", "gamma": 0.5, "last_epoch": -1}
    lr_scheduler = LearningRateScheduler.from_params(optimizer,
                                                     Params(LR_SCHEDULER))

    # TRAIN
    trainer = Trainer(model=model,
                      optimizer=optimizer,
                      iterator=train_iterator,
                      validation_iterator=None,
                      train_dataset=train_data,
                      validation_dataset=None,
                      patience=None,
                      shuffle=True,
                      num_epochs=1,
                      summary_interval=100,
                      learning_rate_scheduler=lr_scheduler,
                      cuda_device=CUDA_DEVICES,
                      grad_norm=5,
                      grad_clipping=5,
                      model_save_interval=600,
                      serialization_dir=args.serialization_dir,
                      keep_serialized_model_every_num_seconds=3600,
                      should_log_parameter_statistics=True,
                      should_log_learning_rate=True)
    trainer.train()
Exemple #12
0
    def __init__(self,
                 vocab: Vocabulary,
                 query_field_embedder: TextFieldEmbedder,
                 doc_field_embedder: TextFieldEmbedder,
                 doc_transformer: FeedForward,
                 query_transformer: FeedForward,
                 scorer: FeedForward,
                 total_scorer: FeedForward,
                 doc_encoder: Seq2VecEncoder = BagOfEmbeddingsEncoder(50),
                 query_encoder: Seq2VecEncoder = BagOfEmbeddingsEncoder(50),
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None,
                 aqwv_corrections: Optional[str] = None,
                 predicting: Optional[bool] = False) -> None:
        super(LeToRWrapper, self).__init__(vocab, regularizer)

        self.query_field_embedder = query_field_embedder
        self.doc_field_embedder = doc_field_embedder
        self.document_transformer = doc_transformer
        self.query_transformer = query_transformer

        self.scorer = scorer
        self.total_scorer = total_scorer
        #self.scorer_norm = nn.BatchNorm1d(1)
        self.doc_encoder = doc_encoder
        self.doc_norm = nn.BatchNorm1d(50)
        self.score_norm = nn.BatchNorm1d(3)
        self.query_encoder = query_encoder
        #self.query_norm = nn.BatchNorm1d(100)
        self.initializer = initializer
        self.regularizer = regularizer
        #self.score_dropout = nn.Dropout(0.05)
        self.emb_dropout = nn.Dropout(0.2)

        self.encoder = CnnEncoder(embedding_dim=1,
                                  num_filters=20,
                                  output_dim=1)
        #self.doc_encoder = CnnEncoder(embedding_dim=50, num_filters=20, output_dim=50)

        if not predicting:
            self.metrics = {
                'accuracy':
                CategoricalAccuracy(),
                'aqwv_2':
                AQWV(corrections_file=aqwv_corrections,
                     cutoff=2,
                     version='program'),
                #'aqwv_3': AQWV(corrections_file=aqwv_corrections, cutoff=3, version='program')
            }

            self.training_metrics = {
                True: ['accuracy'],
                #False: ['accuracy']
                False: ['aqwv_2']  #, 'aqwv_3']
            }
        else:
            self.metrics, self.training_metrics = {}, {True: [], False: []}

        #self.loss = nn.MarginRankingLoss(margin=0.5)
        self.loss = nn.CrossEntropyLoss()
        initializer(self)
 def __init__(self, input_dim, num_filters, output_dim):
     super().__init__()
     self.output_dim = output_dim
     self.cnn_module = CnnEncoder(embedding_dim=input_dim,
                                  num_filters=num_filters,
                                  output_dim=output_dim)