Beispiel #1
0
 def __init__(self, configs):
     super().__init__()
     preprocess_cfg, model_cfg, train_cfg = configs
     
     self.encoder = Encoder(model_cfg)
     self.decoder = Decoder(preprocess_cfg, model_cfg)
     self.postnet = PostNet()
Beispiel #2
0
    def __init__(self,
                 text_vocab: Vocab,
                 tag_vocab: TagVocab,
                 embed_dim,
                 hidden_mode,
                 hidden_dim,
                 hidden_layers,
                 atten_heads=1,
                 num_blocks=1,
                 dropout=0.2):
        super(Tagger, self).__init__()

        self.text_vocab = text_vocab
        self.tag_vocab = tag_vocab

        self.embed = nn.Embedding(len(text_vocab),
                                  embed_dim,
                                  padding_idx=text_vocab.stoi['<pad>'])
        self.time_signal = TimeSignal(embed_dim)
        self.encoder = Encoder(embed_dim,
                               hidden_mode,
                               hidden_dim,
                               hidden_layers,
                               atten_heads=atten_heads,
                               num_blocks=num_blocks,
                               dropout=dropout)

        self.crf = PartialCRF(hidden_dim, len(tag_vocab),
                              tag_vocab.begin_constraints,
                              tag_vocab.end_constraints,
                              tag_vocab.transition_constraints, dropout)
Beispiel #3
0
    def __init__(self, master):

        self.userCallsign = ''
        self.moduleCallsign = ''
        self.encoder = Encoder()
    
        firstCol = Frame(master)
        secCol = Frame(master)
        thdCol = Frame(master)

        # receive flag for avoid loopback
        self.receiveFlag = True

        self.lpbckpnl    = LoopBackPanel(self, firstCol) 
        self.bustMode    = USBCam(self, firstCol) 
        self.bustMode    = TCPConnection(firstCol) 
    
        self.datCon      = DataControl(self, secCol) 
        self.tle         = TLE(self, secCol) 
        self.pas         = PAS(self, secCol)
        self.incPack     = IncomingPackets(thdCol)

        self.applySerial = False 
        self.serial = '' # blank object not null
       
        try :
            self.serial = serial.Serial(sys.argv[1])
            self.applySerial = True
        except:
            print('No serial connected')

        firstCol.grid(row=0, 
                column=0, 
                sticky='nswe',
                padx=20,
                pady=20)

        secCol.grid(row=0, 
                column=1, 
                sticky='nswe',
                padx=10,
                pady=20)
        
        thdCol.grid(row=0, 
                column=2, 
                sticky='nswe',
                padx=10,
                pady=20)
        
        self.cparse = Telecommand(self,self.userCallsign)

        if self.applySerial == True:
            self.serial.setRTS(False)
Beispiel #4
0
class MyModel(nn.Module):
    def __init__(self, configs):
        super().__init__()
        preprocess_cfg, model_cfg, train_cfg = configs
        
        self.encoder = Encoder(model_cfg)
        self.decoder = Decoder(preprocess_cfg, model_cfg)
        self.postnet = PostNet()


    def forward(
        self,

        text,
        text_lens,
        mel,
        gate,
        mel_lens
    ):
        '''
         text: (batch, max_text_len)
         text_lens: (batch)
         mel: (batch, 80, max_mel_len)
         gate: (batch, max_mel_len)
         mel_lens: (batch)
        '''
        
        memory, processed_memory = self.encoder(text, text_lens)

        mel_pred, gate_pred, alignment = self.decoder(memory, processed_memory, text_lens, mel)

        mel_pred_postnet = self.postnet(mel_pred) + mel_pred
        '''
        mask = get_mask_from_lengths(mel_lens) # (batch, max_mel_len)
        mel_pred = mel_pred.masked_fill(mask.unsqueeze(1), 0.0)
        mel_pred_postnet = mel_pred_postnet.masked_fill(mask.unsqueeze(1), 0.0)
        gate_pred = gate_pred.masked_fill(mask, 1e3)
        '''
        return mel_pred, mel_pred_postnet, gate_pred, alignment


    def inference(
        self,

        text
    ):
        '''
         text: (batch, max_text_len)
        '''
        
        memory, processed_memory = self.encoder.inference(text)

        mel_pred, gate_pred, alignment = self.decoder.inference(memory, processed_memory)

        mel_pred_postnet = self.postnet(mel_pred) + mel_pred
        
        return mel_pred, mel_pred_postnet, gate_pred, alignment
Beispiel #5
0
def get_encoder(args,
                log_name,
                legacy_path,
                path,
                dataloader_list,
                device='cpu',
                encoder_type='vae'):
    if encoder_type == 'vae':
        print('Loading the variational autoencoder')
        if legacy_path:
            encoder = Encoder().to(device)
            encoder.load_state_dict(
                torch.load(legacy_path, map_location=device))
        else:
            if path:
                model = DFC_VAE.load_from_checkpoint(path).to(device)
            else:
                model = train_vae(args,
                                  log_name,
                                  dataloader_list,
                                  args.input_height,
                                  is_digit_dataset=args.digital_dataset,
                                  device=device).to(device)
            encoder = model.encoder
    elif encoder_type == 'resnet50':  # Maybe fine tune resnet50 here
        print('Loading the RESNET50 encoder')
        encoder = models.resnet50(pretrained=True, progress=True)

        set_parameter_requires_grad(encoder, req_grad=False)
        # encoder.fc = nn.Linear(1000, args.dfc_hidden_dim) #TODO: Reshape and finetune resnet50
        # get_update_param(encoder)
        encoder = encoder.to(device)
        # encoder, val_acc_history = train_last_layer_resnet50( #train for the 31 classes
        # encoder, dataloader_list, log_name=log_name, device=device, args=args, num_classes=args.dfc_hidden_dim)

    else:
        raise NameError('The encoder_type variable has an unvalid value')
    wandb.watch(encoder)
    return encoder
Beispiel #6
0
    def __init__(self,
                 space_dim=2,
                 embed_hidden=128,
                 enc_stacks=3,
                 ff_hidden=512,
                 enc_heads=16,
                 query_hidden=360,
                 att_hidden=256,
                 crit_hidden=256,
                 n_history=3,
                 p_dropout=0.1):
        """Agent, made of an encoder + decoder for the actor part, and a critic
        part.

        Args:
            space_dim (int, optional): Dimension for the cities coordinates.
                Defaults to 2.
            embed_hidden (int, optional): Embeddings hidden size. Defaults to 128.
            enc_stacks (int, optional): Number of encoder layers. Defaults to 3.
            ff_hidden (int, optional): Hidden size for the FF part of the encoder.
                Defaults to 512.
            enc_heads (int, optional): Number of attention heads for the encoder.
                Defaults to 16.
            query_hidden (int, optional): Query hidden size. Defaults to 360.
            att_hidden (int, optional): Attention hidden size. Defaults to 256.
            crit_hidden (int, optional): Critic hidden size. Defaults to 256.
            n_history (int, optional): Size of history (memory size of the
                decoder). Defaults to 3.
            p_dropout (float, optional): Dropout rate. Defaults to 0.1.
        """
        super().__init__()

        # Actor
        self.embedding = Embedding(in_dim=space_dim, out_dim=embed_hidden)
        self.encoder = Encoder(num_layers=enc_stacks,
                               n_hidden=embed_hidden,
                               ff_hidden=ff_hidden,
                               num_heads=enc_heads,
                               p_dropout=p_dropout)
        self.decoder = Decoder(n_hidden=embed_hidden,
                               att_dim=att_hidden,
                               query_dim=query_hidden,
                               n_history=n_history)

        # Critic
        self.critic = Critic(n_hidden=embed_hidden,
                             att_hidden=att_hidden,
                             crit_hidden=crit_hidden)
 def __init__(self,
              voca_size,
              emb_dim=256,
              enc_hidden_dim=128,
              proj_dim=128,
              num_mel=80,
              dec_hidden_dim=256,
              reduction_factor=5,
              num_freq=1024):
     super(Tacotron, self).__init__()
     self.encoder = Encoder(voca_size=voca_size,
                            emb_dim=256,
                            hidden_dim=enc_hidden_dim,
                            proj_dim=proj_dim)
     self.mel_decoder = Mel_Decoder(num_mel=num_mel,
                                    hidden_dim=dec_hidden_dim,
                                    reduction_factor=reduction_factor)
     self.post_processing = Post_processing(hidden_dim=enc_hidden_dim,
                                            proj_dim=num_mel,
                                            num_freq=num_freq)
Beispiel #8
0
def main(args):
    # Setting
    warnings.simplefilter("ignore", UserWarning)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # Data Loading
    print('Data loading and data spliting...')
    with open(args.data_path, 'rb') as f:
        data = pickle.load(f)
        src_word2id = data['hanja_word2id']
        src_vocab = [k for k in src_word2id.keys()]
        trg_word2id = data['korean_word2id']
        trg_vocab = [k for k in trg_word2id.keys()]
        train_src_list = data['train_hanja_indices']
        train_trg_list = data['train_korean_indices']
        train_add_hanja = data['train_additional_hanja_indices']
        valid_src_list = data['valid_hanja_indices']
        valid_trg_list = data['valid_korean_indices']
        valid_add_hanja = data['valid_additional_hanja_indices']

        src_vocab_num = len(src_vocab)
        trg_vocab_num = len(trg_vocab)

        del data
    print('Done!')

    # Dataset & Dataloader setting
    dataset_dict = {
        'train':
        CustomDataset(train_src_list,
                      train_trg_list,
                      mask_idx=args.mask_idx,
                      min_len=args.min_len,
                      src_max_len=args.src_max_len,
                      trg_max_len=args.trg_max_len),
        'valid':
        CustomDataset(valid_src_list,
                      valid_trg_list,
                      mask_idx=args.mask_idx,
                      min_len=args.min_len,
                      src_max_len=args.src_max_len,
                      trg_max_len=args.trg_max_len)
    }
    dataloader_dict = {
        'train':
        DataLoader(dataset_dict['train'],
                   collate_fn=PadCollate(),
                   drop_last=True,
                   batch_size=args.batch_size,
                   shuffle=True,
                   pin_memory=True),
        'valid':
        DataLoader(dataset_dict['valid'],
                   collate_fn=PadCollate(),
                   drop_last=True,
                   batch_size=args.batch_size,
                   shuffle=True,
                   pin_memory=True)
    }
    print(
        f"Total number of trainingsets  iterations - {len(dataset_dict['train'])}, {len(dataloader_dict['train'])}"
    )

    # Model Setting
    print("Instantiating models...")
    encoder = Encoder(src_vocab_num,
                      args.embed_size,
                      args.hidden_size,
                      n_layers=args.n_layers,
                      pad_idx=args.pad_idx,
                      dropout=args.dropout,
                      embedding_dropout=args.embedding_dropout)
    decoder = Decoder(args.embed_size,
                      args.hidden_size,
                      trg_vocab_num,
                      n_layers=args.n_layers,
                      pad_idx=args.pad_idx,
                      dropout=args.dropout,
                      embedding_dropout=args.embedding_dropout)
    seq2seq = Seq2Seq(encoder, decoder, device)
    optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                  seq2seq.parameters()),
                           lr=args.lr,
                           weight_decay=args.w_decay)
    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          step_size=args.lr_decay_step,
                                          gamma=args.lr_decay)
    #criterion = nn.CrossEntropyLoss(ignore_index=args.pad_idx)
    torch_utils.clip_grad_norm_(seq2seq.parameters(), args.grad_clip)
    print(seq2seq)

    print('Model train start...')
    best_val_loss = None
    seq2seq.to(device)
    teacher_forcing_ratio = 1.0
    if not os.path.exists('./rnn_based/save'):
        os.mkdir('./rnn_based/save')
    for e in range(args.num_epoch):
        start_time_e = time.time()
        for phase in ['train', 'valid']:
            if phase == 'train':
                seq2seq.train()
            if phase == 'valid':
                seq2seq.eval()
                val_loss = 0
            total_loss_list = list()
            freq = args.print_freq - 1
            for (src, trg, _, _) in tqdm(dataloader_dict[phase]):
                # Sourcen, Target sentence setting
                src = src.transpose(0, 1).to(device)
                trg = trg.transpose(0, 1).to(device)

                # Optimizer setting
                optimizer.zero_grad()

                # Model / Calculate loss
                with torch.set_grad_enabled(phase == 'train'):
                    teacher_forcing_ratio_ = teacher_forcing_ratio if phase == 'train' else 0
                    output = seq2seq(
                        src, trg, teacher_forcing_ratio=teacher_forcing_ratio_)
                    output_flat = output[1:].view(-1, trg_vocab_num)
                    trg_flat = trg[1:].contiguous().view(-1)
                    #loss = criterion(output_flat, trg_flat)
                    loss = F.cross_entropy(
                        output[1:].transpose(0, 1).contiguous().view(
                            -1, trg_vocab_num),
                        trg[1:].transpose(0, 1).contiguous().view(-1),
                        ignore_index=args.pad_idx)
                    if phase == 'valid':
                        val_loss += loss.item()

                # If phase train, then backward loss and step optimizer and scheduler
                if phase == 'train':
                    loss.backward()
                    optimizer.step()

                    # Print loss value only training
                    freq += 1
                    if freq == args.print_freq:
                        total_loss = loss.item()
                        print("[loss:%5.2f][pp:%5.2f]" %
                              (total_loss, math.exp(total_loss)))
                        total_loss_list.append(total_loss)
                        freq = 0

            # Finishing iteration
            if phase == 'train':
                pd.DataFrame(total_loss_list).to_csv(
                    './rnn_based/save/{} epoch_loss.csv'.format(e),
                    index=False)
            if phase == 'valid':
                val_loss /= len(dataloader_dict['valid'])
                print(
                    "[Epoch:%d] val_loss:%5.3f | val_pp:%5.2fS | spend_time:%5.2fmin"
                    % (e, val_loss, math.exp(val_loss),
                       (time.time() - start_time_e) / 60))
                if not best_val_loss or val_loss < best_val_loss:
                    print("[!] saving model...")
                    torch.save(seq2seq.state_dict(),
                               './rnn_based/save/seq2seq_{}.pt'.format(e))
                    best_val_loss = val_loss

        scheduler.step()
        teacher_forcing_ratio *= 0.9
    print('Done!')
Beispiel #9
0
def main():
    set_seed(args.seed)
    torch.cuda.set_device(args.gpu)

    encoder = Encoder().cuda()
    encoder_group_0 = Encoder().cuda()
    encoder_group_1 = Encoder().cuda()

    dfc = DFC(cluster_number=args.k, hidden_dimension=64).cuda()
    dfc_group_0 = DFC(cluster_number=args.k, hidden_dimension=64).cuda()
    dfc_group_1 = DFC(cluster_number=args.k, hidden_dimension=64).cuda()

    critic = AdversarialNetwork(in_feature=args.k,
                                hidden_size=32,
                                max_iter=args.iters,
                                lr_mult=args.adv_mult).cuda()

    # encoder pre-trained with self-reconstruction
    encoder.load_state_dict(torch.load("./save/encoder_pretrain.pth"))

    # encoder and clustering model trained by DEC
    encoder_group_0.load_state_dict(torch.load("./save/encoder_mnist.pth"))
    encoder_group_1.load_state_dict(torch.load("./save/encoder_usps.pth"))
    dfc_group_0.load_state_dict(torch.load("./save/dec_mnist.pth"))
    dfc_group_1.load_state_dict(torch.load("./save/dec_usps.pth"))

    # load clustering centroids given by k-means
    centers = np.loadtxt("./save/centers.txt")
    cluster_centers = torch.tensor(centers,
                                   dtype=torch.float,
                                   requires_grad=True).cuda()
    with torch.no_grad():
        print("loading clustering centers...")
        dfc.state_dict()['assignment.cluster_centers'].copy_(cluster_centers)

    optimizer = torch.optim.Adam(dfc.get_parameters() +
                                 encoder.get_parameters() +
                                 critic.get_parameters(),
                                 lr=args.lr,
                                 weight_decay=5e-4)
    criterion_c = nn.KLDivLoss(reduction="sum")
    criterion_p = nn.MSELoss(reduction="sum")
    C_LOSS = AverageMeter()
    F_LOSS = AverageMeter()
    P_LOSS = AverageMeter()

    encoder_group_0.eval(), encoder_group_1.eval()
    dfc_group_0.eval(), dfc_group_1.eval()

    data_loader = mnist_usps(args)
    len_image_0 = len(data_loader[0])
    len_image_1 = len(data_loader[1])

    for step in range(args.iters):
        encoder.train()
        dfc.train()
        if step % len_image_0 == 0:
            iter_image_0 = iter(data_loader[0])
        if step % len_image_1 == 0:
            iter_image_1 = iter(data_loader[1])

        image_0, _ = iter_image_0.__next__()
        image_1, _ = iter_image_1.__next__()

        image_0, image_1 = image_0.cuda(), image_1.cuda()
        image = torch.cat((image_0, image_1), dim=0)

        predict_0, predict_1 = dfc_group_0(
            encoder_group_0(image_0)[0]), dfc_group_1(
                encoder_group_1(image_1)[0])

        z, _, _ = encoder(image)
        output = dfc(z)

        output_0, output_1 = output[0:args.bs, :], output[args.bs:args.bs *
                                                          2, :]
        target_0, target_1 = target_distribution(
            output_0).detach(), target_distribution(output_1).detach()

        clustering_loss = 0.5 * criterion_c(output_0.log(
        ), target_0) + 0.5 * criterion_c(output_1.log(), target_1)
        fair_loss = adv_loss(output, critic)
        partition_loss = 0.5 * criterion_p(aff(output_0), aff(predict_0).detach()) \
                         + 0.5 * criterion_p(aff(output_1), aff(predict_1).detach())
        total_loss = clustering_loss + args.coeff_fair * fair_loss + args.coeff_par * partition_loss

        optimizer = inv_lr_scheduler(optimizer, args.lr, step, args.iters)
        optimizer.zero_grad()
        total_loss.backward()
        optimizer.step()

        C_LOSS.update(clustering_loss)
        F_LOSS.update(fair_loss)
        P_LOSS.update(partition_loss)

        if step % args.test_interval == args.test_interval - 1 or step == 0:
            predicted, labels = predict(data_loader, encoder, dfc)
            predicted, labels = predicted.cpu().numpy(), labels.numpy()
            _, accuracy = cluster_accuracy(predicted, labels, 10)
            nmi = normalized_mutual_info_score(labels,
                                               predicted,
                                               average_method="arithmetic")
            bal, en_0, en_1 = balance(predicted, 60000)

            print("Step:[{:03d}/{:03d}]  "
                  "Acc:{:2.3f};"
                  "NMI:{:1.3f};"
                  "Bal:{:1.3f};"
                  "En:{:1.3f}/{:1.3f};"
                  "C.loss:{C_Loss.avg:3.2f};"
                  "F.loss:{F_Loss.avg:3.2f};"
                  "P.loss:{P_Loss.avg:3.2f};".format(step + 1,
                                                     args.iters,
                                                     accuracy,
                                                     nmi,
                                                     bal,
                                                     en_0,
                                                     en_1,
                                                     C_Loss=C_LOSS,
                                                     F_Loss=F_LOSS,
                                                     P_Loss=P_LOSS))

    return
Beispiel #10
0
# Code to test the functionality of get_cluster_center()
if __name__ == "__main__":
    print('\n \nTesting loading centers...')
    parser = argparse.ArgumentParser()
    parser.add_argument("--gpu", type=int, default=0)
    parser.add_argument("--seed", type=int, default=2019)
    parser.add_argument("--centers", type=str, default="save/centers.txt")
    parser.add_argument("--log_dir", type=str, default="clusters_temp/")
    parser.add_argument("--encoder_bs", type=int, default=128)

    parser.add_argument("--bs", type=int, default=512)
    parser.add_argument("--cluster_n_init", type=int, default=20)
    parser.add_argument("--cluster_max_step", type=int, default=5000)
    parser.add_argument("--half_tensor", type=bool, default=False)
    parser.add_argument("--encoder_type", type=str, default='vae')
    args = parser.parse_args()
    device_local = 'cpu'  # set device for this script
    clusters = get_cluster_centers(file_path=args.centers, device=device_local)  # Test loading centers.txt
    print("Loaded centers with shape", clusters.shape, "\n \n")

    print("Testing generating centers...")

    encoder = Encoder().to(device_local)  # load encoder class
    legacy_path = 'save/encoder_pretrain.pth'  # path to pretrained encoder

    encoder.load_state_dict(torch.load(legacy_path, map_location=device_local))  # load encoder
    data_itt = mnist_usps(args)[0]  # get dataiterator from data loader
    clusters = get_cluster_centers(args=args, autoencoder=encoder, device=device_local,
                                   cluster_number=10, dataloader_list=[data_itt], save_name="test_cluster")
    print("Loaded centers with shape", clusters.shape)
Beispiel #11
0
class Tagger(nn.Module):
    def __init__(self,
                 text_vocab: Vocab,
                 tag_vocab: TagVocab,
                 embed_dim,
                 hidden_mode,
                 hidden_dim,
                 hidden_layers,
                 atten_heads=1,
                 num_blocks=1,
                 dropout=0.2):
        super(Tagger, self).__init__()

        self.text_vocab = text_vocab
        self.tag_vocab = tag_vocab

        self.embed = nn.Embedding(len(text_vocab),
                                  embed_dim,
                                  padding_idx=text_vocab.stoi['<pad>'])
        self.time_signal = TimeSignal(embed_dim)
        self.encoder = Encoder(embed_dim,
                               hidden_mode,
                               hidden_dim,
                               hidden_layers,
                               atten_heads=atten_heads,
                               num_blocks=num_blocks,
                               dropout=dropout)

        self.crf = PartialCRF(hidden_dim, len(tag_vocab),
                              tag_vocab.begin_constraints,
                              tag_vocab.end_constraints,
                              tag_vocab.transition_constraints, dropout)

    def criterion(self, batch: data.Batch):
        _, text_lens = batch.text
        masks, tags = batch.tags
        feats = self.feature(batch)

        return self.crf.criterion(feats, masks, text_lens)

    def feature(self, batch: data.Batch):

        text, text_lens = batch.text
        golds = batch.tags
        embed = self.embed(text) + self.time_signal(text)

        hidden = self.encoder(embed, text_lens)

        return hidden

    def predict(self, batch: data.Batch):

        self.eval()
        _, text_len = batch.text
        feats = self.feature(batch)

        return self.crf(feats, text_len)

    def print(self, batch: data.Batch):
        text, text_len = batch.text
        gold_masks, gold_tags = batch.tags
        for i in range(len(text_len)):
            length = text_len[i]
            print(' '.join([
                self.text_vocab.itos[w] + '#' + t
                for w, t in zip(text[0:length, i].data.tolist(), gold_tags[i])
            ]))

    def sample(self, batch: data.Batch):

        self.eval()
        text, text_len = batch.text
        gold_masks, gold_tags = batch.tags
        pred_tags = self.predict(batch)

        results = []
        for i in range(len(pred_tags)):
            score, pred_tag = pred_tags[i]
            length = text_len[i]
            sen = [
                self.text_vocab.itos[w]
                for w in text[0:length, i].data.tolist()
            ]

            def tostr(words: List[str], tags: List[int]):
                tags = [self.tag_vocab.itos[tag_id] for tag_id in tags]
                prev_alnum = False
                for word, tag in zip(words, tags):
                    if tag == 'E_O' or tag == 'S_O':
                        yield word + ' '
                    elif tag == 'B_O':
                        yield word
                    elif tag.startswith('B_'):
                        yield '{{%s:%s' % (tag[2:], word)
                    elif tag.startswith('E_'):
                        yield word + '}} '
                    elif tag.startswith('S_'):
                        yield '{{%s:%s}} ' % (tag[2:], word)
                    else:
                        yield word

                    prev_alnum = word.isalnum()

            pred_tag = ''.join(tostr(sen, pred_tag))
            #gold_tag = ''.join([tostr(w, id) for w, id in zip(sen, gold_tags[0:length, i].data)])

            # if pred_tag != gold_tag:
            # print('\ngold: %s\npred: %s\nscore: %f' % (gold_tag, pred_tag, score))
            print('\npred: %s\nscore: %f' % (pred_tag, score))

        return results

    @staticmethod
    def evaluation_one(self, pred: List[int], gold: torch.Tensor):
        correct = 0
        true = 0

        for curr in range(0, len(pred)):
            tag_list = [
                i for i in range(gold.size(-1)) if gold[curr, i].data == 1
            ]
            if len(tag_list) == 1:
                true += 1
                gold_tag = tag_list[0]
                if pred[curr] == gold_tag:
                    correct += 1

        return correct, true

    def evaluation(self, data_it):
        self.eval()
        correct, true, pos = 0, 0, 0
        for batch in data_it:
            _, text_len = batch.text
            golds = batch.tags
            preds = self.predict(batch)

            #print(gold_tags)
            for i in range(len(text_len)):
                score, pred = preds[i]
                gold = golds[0:text_len[i], i]
                #c, t = self.evaluation_one(pred, gold)
                #correct += c
                #true += t

        recall = correct / float(true + 1e-5)
        return {'recall': recall}

    def coarse_params(self):
        yield from self.embed.parameters()
        yield from self.time_signal.parameters()
        yield from self.encoder.parameters()
        yield from self.crf.parameters()

    def fine_params(self):
        yield from self.embed.parameters()
        yield from self.time_signal.parameters()
        yield from self.crf.parameters()
Beispiel #12
0
def dfc_encoder(*args):
    return Encoder()
Beispiel #13
0
class App:

    def __init__(self, master):

        self.userCallsign = ''
        self.moduleCallsign = ''
        self.encoder = Encoder()
    
        firstCol = Frame(master)
        secCol = Frame(master)
        thdCol = Frame(master)

        # receive flag for avoid loopback
        self.receiveFlag = True

        self.lpbckpnl    = LoopBackPanel(self, firstCol) 
        self.bustMode    = USBCam(self, firstCol) 
        self.bustMode    = TCPConnection(firstCol) 
    
        self.datCon      = DataControl(self, secCol) 
        self.tle         = TLE(self, secCol) 
        self.pas         = PAS(self, secCol)
        self.incPack     = IncomingPackets(thdCol)

        self.applySerial = False 
        self.serial = '' # blank object not null
       
        try :
            self.serial = serial.Serial(sys.argv[1])
            self.applySerial = True
        except:
            print('No serial connected')

        firstCol.grid(row=0, 
                column=0, 
                sticky='nswe',
                padx=20,
                pady=20)

        secCol.grid(row=0, 
                column=1, 
                sticky='nswe',
                padx=10,
                pady=20)
        
        thdCol.grid(row=0, 
                column=2, 
                sticky='nswe',
                padx=10,
                pady=20)
        
        self.cparse = Telecommand(self,self.userCallsign)

        if self.applySerial == True:
            self.serial.setRTS(False)
    
    def recv_data(self):  
        while True :
            receive = rx.receive()
            if receive.closed == True : 
                print_lock.release()
                break

            self.cparse.parse(receive.callsign, 
                    receive.message)


    def setCallsign(self, userCallsign, moduleCallsign): 
        self.userCallsign = userCallsign
        self.moduleCallsign = moduleCallsign

    def telecommand(self, command):
        if self.applySerial == True:
            self.serial.setRTS(True)

        time.sleep(0.5)
        if self.userCallsign != '' and self.moduleCallsign != '':  
            self.encoder.encode(self.userCallsign,
                    self.moduleCallsign,
                    command)
        else: 
            playsound('warning.wav')
        
        if self.applySerial == True: 
            self.serial.setRTS(False)
    
    def telecommand_with_message(self, command, message):
        if self.applySerial == True:
            self.serial.setRTS(True)
        
        time.sleep(0.5)
        
        if self.userCallsign != '' and self.moduleCallsign != '':  
            self.encoder.encode_with_message(self.userCallsign,
                    self.moduleCallsign,
                    command,
                    message)
        else: 
            playsound('warning.wav')

        if self.applySerial == True: 
            self.serial.setRTS(False)
    
    def getReceiveFlag(self):
        return self.receiveFlag