def _run_epoch(epoch, mode):
    model.train(True)
    if mode == "train":
        description = 'Train'
        dataset = trainData
        shuffle = True
    else:
        description = 'Valid'
        dataset = validData
        shuffle = False
    dataloader = DataLoader(dataset=dataset,
                            batch_size=batch_size,
                            shuffle=shuffle,
                            collate_fn=dataset.collate_fn,
                            num_workers=8)

    trange = tqdm(enumerate(dataloader),
                  total=len(dataloader),
                  desc=description)
    loss = 0
    f1_score = F1()

    # from class AbstractDataset(Dataset)
    for i, (
            x, y, sent_len
    ) in trange:  # x = torch.LongTensor(batch_abstract), y = torch.FloatTensor(batch_label), sent_len = sent_len

        # Butters
        #print('In _run_epoch, i=', str(i), ' ', 'shape of x', x.shape, ' ', 'shape of y', y.shape, ' ', 'len of sent_len', len(sent_len), '\n')
        if (x.size()[0] is not batch_size):
            continue
        o_labels, batch_loss = _run_iter(x, y)
        if mode == "train":
            opt.zero_grad()
            batch_loss.backward()
            opt.step()

        loss += batch_loss.item()
        f1_score.update(o_labels.cpu(), y)

        trange.set_postfix(loss=loss / (i + 1), f1=f1_score.print_score())

    if mode == "train":
        history['train'].append({
            'f1': f1_score.get_score(),
            'loss': loss / len(trange)
        })
        writer.add_scalar('Loss/train', loss / len(trange), epoch)
        writer.add_scalar('F1_score/train', f1_score.get_score(), epoch)
    else:
        history['valid'].append({
            'f1': f1_score.get_score(),
            'loss': loss / len(trange)
        })
        writer.add_scalar('Loss/valid', loss / len(trange), epoch)
        writer.add_scalar('F1_score/valid', f1_score.get_score(), epoch)
    trange.close()
예제 #2
0
    def run_epoch(self, epoch, training):
        self.model.train(training)

        if training:
            description = 'Train'
            dataset = self.trainData
            shuffle = True
        else:
            description = 'Valid'
            dataset = self.validData
            shuffle = False
        dataloader = DataLoader(dataset=dataset,
                                batch_size=self.batch_size,
                                shuffle=shuffle,
                                collate_fn=dataset.collate_fn,
                                num_workers=4)

        trange = tqdm(enumerate(dataloader),
                      total=len(dataloader),
                      desc=description)

        loss = 0
        acc = accuracy()

        for i, (x, _, y) in trange:
            o_labels, batch_loss = self.run_iter(x, y)
            if training:
                self.opt.zero_grad()
                batch_loss.backward()
                self.opt.step()

            loss += batch_loss.item()
            acc.update(o_labels.cpu(), y)

            trange.set_postfix(loss=loss / (i + 1), acc=acc.print_score())
        if training:
            self.history['train'].append({
                'acc': acc.get_score(),
                'loss': loss / len(trange)
            })
            self.save_hist()
        else:
            self.history['valid'].append({
                'acc': acc.get_score(),
                'loss': loss / len(trange)
            })
            self.save_hist()
            if acc.get_score() > self.best_val:
                self.best_val = acc.get_score()
                self.save_best(epoch)
def _run_epoch(epoch, training, fine_tune):
    model.train(training)
    if training:
        description = 'Train'
        dataset = trainData
        shuffle = True
    else:
        description = 'Valid'
        dataset = validData
        shuffle = False

    if fine_tune:
        model.unfreeze()
    else:
        model.freeze()
    dataloader = DataLoader(dataset=dataset,
                            batch_size=12,
                            shuffle=shuffle,
                            collate_fn=dataset.collate_fn,
                            num_workers=4)

    trange = tqdm(enumerate(dataloader),
                  total=len(dataloader),
                  desc=description)
    loss = 0
    f1_score = F1()

    for i, (x, y) in trange:
        batch_loss, o_labels = _run_iter(x, y)
        if training:
            opt.zero_grad()
            batch_loss.backward()
            opt.step()

        loss += batch_loss.item()
        f1_score.update(o_labels.cpu(), y)

        trange.set_postfix(loss=loss / (i + 1), f1=f1_score.print_score())

    if training:
        history['train'].append({
            'f1': f1_score.get_score(),
            'loss': loss / len(trange)
        })
    else:
        history['valid'].append({
            'f1': f1_score.get_score(),
            'loss': loss / len(trange)
        })
def _run_epoch(epoch, mode):
    
    model.train(True)
    if mode=="train":
        description = 'Train'
        dataset = trainData
        shuffle = True
    else:
        description = 'Valid'
        dataset = validData
        shuffle = False
    dataloader = DataLoader(dataset=dataset,
                            batch_size=batch_size,
                            shuffle=shuffle,
                            collate_fn=dataset.collate_fn,
                            num_workers=8)

    trange = tqdm(enumerate(dataloader), total=len(dataloader), desc=description)
    loss = 0
    f1_score = F1()
    for i, (x, y, sent_len) in trange:
        o_labels, batch_loss = _run_iter(x,y)
        if mode=="train":
            opt.zero_grad()
            batch_loss.backward()
            opt.step()

        loss += batch_loss.item()
        f1_score.update(o_labels.cpu(), y)

        trange.set_postfix(
            loss=loss / (i + 1), f1=f1_score.print_score())
    if mode=="train":
        history['train'].append({'f1':f1_score.get_score(), 'loss':loss/ len(trange)})
        writer.add_scalar('Loss/train', loss/ len(trange), epoch)
        writer.add_scalar('F1_score/train', f1_score.get_score(), epoch)
    else:
        history['valid'].append({'f1':f1_score.get_score(), 'loss':loss/ len(trange)})
        writer.add_scalar('Loss/valid', loss/ len(trange), epoch)
        writer.add_scalar('F1_score/valid', f1_score.get_score(), epoch)
    trange.close()
예제 #5
0
    def run_epoch(self, epoch, training):
        self.classficationA.train(training)
        self.classficationB.train(training)

        if training:
            description = 'Train'
            dataset = self.trainData
            shuffle = True
        else:
            description = 'Valid'
            dataset = self.validData
            shuffle = False
        dataloader = DataLoader(dataset=dataset,
                                batch_size=256,
                                shuffle=shuffle,
                                collate_fn=dataset.collate_fn,
                                num_workers=4)

        trange = tqdm(enumerate(dataloader),
                      total=len(dataloader),
                      desc=description)

        mse_loss = 0
        lossA = 0
        lossB = 0
        accA = accuracy()
        accB = accuracy()

        for i, (ft, missing_ft, labels) in trange:
            ft = ft.to(self.device)
            missing_ft = missing_ft.to(self.device)
            all_ft = torch.cat([ft, missing_ft], dim=1)
            labels = labels.to(self.device)

            # ------------------
            #  Train ClassifierA
            # ------------------

            missing_out, missing_hidden_out = self.classficationA(ft)
            all_out, all_hidden_out = self.classficationB(all_ft)
            batch_loss = self.criterion(missing_out, labels)
            batch_mse_loss = 0
            for missing_hidden, all_hidden in zip(missing_hidden_out,
                                                  all_hidden_out):
                batch_mse_loss += self.mse_loss(missing_hidden, all_hidden)
            mse_loss += batch_mse_loss.item()

            if training:
                self.opt_C_A.zero_grad()
                (batch_mse_loss + batch_loss).backward()
                self.opt_C_A.step()
            lossA += batch_loss.item()
            accA.update(missing_out, labels)

            # ------------------
            #  Train ClassifierB
            # ------------------

            all_out, _ = self.classficationB(all_ft)
            batch_loss = self.criterion(all_out, labels)
            if training:
                self.opt_C_B.zero_grad()
                batch_loss.backward()
                self.opt_C_B.step()
            lossB += batch_loss.item()
            accB.update(all_out, labels)

            trange.set_postfix(accA=accA.print_score(),
                               accB=accB.print_score(),
                               lossA=lossA / (i + 1),
                               lossB=lossB / (i + 1),
                               mseLoss=mse_loss / (i + 1))
        if training:
            self.history['train'].append({
                'accA': accA.get_score(),
                'accB': accB.get_score(),
                'lossA': lossA / len(trange),
                'lossB': lossB / len(trange),
                'mseLoss': mse_loss / len(trange)
            })
            self.save_hist()

        else:
            self.history['valid'].append({
                'accA': accA.get_score(),
                'accB': accB.get_score(),
                'lossA': lossA / len(trange),
                'lossB': lossB / len(trange),
                'mseLoss': mse_loss / len(trange)
            })
            self.save_hist()
            if self.best_val < accA.get_score():
                self.best_val = accA.get_score()
                self.save_best(epoch)
    def run_epoch(self, epoch, training):
        self.fadding_model.train(training)
        self.fixed_model.train(False)

        if training:
            description = 'Train'
            dataset = self.trainData
            shuffle = True
        else:
            description = 'Valid'
            dataset = self.validData
            shuffle = False
        dataloader = DataLoader(dataset=dataset,
                                batch_size=self.batch_size,
                                shuffle=shuffle,
                                collate_fn=dataset.collate_fn,
                                num_workers=4)

        trange = tqdm(enumerate(dataloader),
                      total=len(dataloader),
                      desc=description)

        loss = 0
        acc_fadding = accuracy()
        acc_fixed = accuracy()

        for i, (ft, missing_ft, labels) in trange:
            ft = ft.to(self.device)
            missing_ft = missing_ft.to(self.device)
            labels = labels.to(self.device)
            missing_fadding_ft = missing_ft * (0.9**((epoch * 100)**(1 / 2)))
            missing_0_ft = missing_ft * 0

            fadding_ft = torch.cat([missing_fadding_ft, ft], dim=1)
            zero_ft = torch.cat([missing_0_ft, ft], dim=1)
            raw_ft = torch.cat([missing_ft, ft], dim=1)

            fadding_out, fadding_hiddens = self.fadding_model(fadding_ft)
            zero_out, _ = self.fadding_model(zero_ft)
            raw_out, raw_hiddens = self.fixed_model(raw_ft)

            batch_loss = 0
            for raw_hidden, fadding_hidden in zip(raw_hiddens,
                                                  fadding_hiddens):
                batch_loss += self.criteria(raw_hidden, fadding_hidden)

            batch_loss += self.criteria(raw_out, fadding_out)

            if training:
                self.opt.zero_grad()
                batch_loss.backward()
                self.opt.step()

            loss += batch_loss.item()
            acc_fadding.update(fadding_out, labels)
            acc_fixed.update(zero_out, labels)

            trange.set_postfix(loss=loss / (i + 1),
                               acc_fadding=acc_fadding.print_score(),
                               acc_fixed=acc_fixed.print_score())

        # self.scheduler.step()

        if training:
            self.history['train'].append({
                'acc-fadding': acc_fadding.get_score(),
                'acc_fixed': acc_fixed.get_score(),
                'loss': loss / len(trange)
            })
            self.save_hist()
        else:
            self.history['valid'].append({
                'acc-fadding': acc_fadding.get_score(),
                'acc_fixed': acc_fixed.get_score(),
                'loss': loss / len(trange)
            })
            self.save_hist()
            if acc_fixed.get_score() > self.best_val:
                self.best_val = acc_fixed.get_score()
                self.save_best(epoch)
예제 #7
0
all_delta_Ei = []
for i in trange:
    qubits_mat, beta, E_off, flip_count, delta_Ei_list = TSPsolver(city_size,
                                                                    qubits_mat,
                                                                    distance_mat,
                                                                    beta,
                                                                    beta_increment, 
                                                                    E_off, 
                                                                    flip_count,
                                                                    E_off_increment)
    a_term,b_term = calculate_E(city_size, qubits_mat, distance_mat, A,B)

    trange.set_postfix(Energy = a_term+b_term,
                       one_bits = np.sum(qubits_mat), 
                       beta = beta, 
                       E_off = E_off,
                       Aterm = a_term,
                       Bterm = b_term,
                       flip = flip_count)

    total_E.append(a_term+b_term)
#%%
# make plot
# find city order
start_point, end_point = [],[]
for i in range(city_size-1):
    first = np.where(qubits_mat[:,i]==1)[0]
    second = np.where(qubits_mat[:,i+1]==1)[0]
    first, second = np.repeat(first, len(second)), np.tile(second, len(first))
    start_point.extend(first)
    end_point.extend(second)
    def run_epoch(self, epoch, training):
        self.generator.train(training)
        self.discriminator.train(training)
        self.classfication.train(training)

        if training:
            description = 'Train'
            dataset = self.trainData
            shuffle = True
        else:
            description = 'Valid'
            dataset = self.validData
            shuffle = False
        dataloader = DataLoader(dataset=dataset,
                                batch_size=256,
                                shuffle=shuffle,
                                collate_fn=dataset.collate_fn,
                                num_workers=4)

        trange = tqdm(enumerate(dataloader),
                      total=len(dataloader),
                      desc=description)

        g_loss = 0
        d_loss = 0
        loss = 0
        acc = accuracy()

        for i, (ft, missing_ft, labels) in trange:
            ft = ft.to(self.device)
            missing_ft = missing_ft.to(self.device)
            labels = labels.to(self.device)
            batch_size = ft.shape[0]
            true = Variable(torch.FloatTensor(batch_size, 1).fill_(1.0),
                            requires_grad=False).to(self.device)  # (batch, 1)
            fake = Variable(torch.FloatTensor(batch_size, 1).fill_(0.0),
                            requires_grad=False).to(self.device)  # (batch, 1)

            # -----------------
            #  Train Generator
            # -----------------

            gen_missing = self.generator(ft.detach())
            validity = self.discriminator(gen_missing)
            batch_g_loss = self.adversarial_loss(validity, true)

            if training:
                self.opt_G.zero_grad()
                batch_g_loss.backward()
                self.opt_G.step()
            g_loss += batch_g_loss.item()

            # ---------------------
            #  Train Discriminator
            # ---------------------
            real_pred = self.discriminator(missing_ft)
            d_real_loss = self.adversarial_loss(real_pred, true)

            fake_missing = self.generator(ft.detach())
            fake_pred = self.discriminator(fake_missing)
            d_fake_loss = self.adversarial_loss(fake_pred, fake)
            batch_d_loss = (d_real_loss + d_fake_loss) / 2

            if training:
                self.opt_D.zero_grad()
                batch_d_loss.backward()
                self.opt_D.step()
            d_loss += batch_d_loss.item()

            # ------------------
            #  Train Classifier
            # ------------------

            gen_missing = self.generator(ft.detach())
            all_features = torch.cat((ft, gen_missing), dim=1)
            o_labels = self.classfication(all_features)
            batch_loss = self.criterion(o_labels, labels)
            if training:
                self.opt_C.zero_grad()
                batch_loss.backward()
                self.opt_C.step()
            loss += batch_loss.item()

            acc.update(o_labels, labels)

            trange.set_postfix(acc=acc.print_score(),
                               g_loss=g_loss / (i + 1),
                               d_loss=d_loss / (i + 1),
                               loss=loss / (i + 1))

        if training:
            self.history['train'].append({
                'acc': acc.get_score(),
                'g_loss': g_loss / len(trange),
                'd_loss': d_loss / len(trange),
                'loss': loss / len(trange)
            })
            self.save_hist()

        else:
            self.history['valid'].append({
                'acc': acc.get_score(),
                'g_loss': g_loss / len(trange),
                'd_loss': d_loss / len(trange),
                'loss': loss / len(trange)
            })
            self.save_hist()
            if self.best_val < acc.get_score():
                self.best_val = acc.get_score()
                self.save_best(epoch)