Esempio n. 1
0
 def show_local_music_page_data(self):
     self.label_2.setText("%d首音乐" % len(self.curWholeMusicList.musics))
     self.tb_local_music.clearContents()
     self.tb_local_music.setRowCount(len(self.curMusicList.musics))
     self.set_tb_local_music_layout()
     for i in range(len(self.curMusicList.musics)):
         music = self.curMusicList.get(i)
         item = QTableWidgetItem()
         item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
         item.setText(str(i + 1) + " ")
         self.tb_local_music.setItem(i, 0, item)
         item = QTableWidgetItem(music.title)
         item.setToolTip(str(music.title))
         self.tb_local_music.setItem(i, 1, item)
         item = QTableWidgetItem(music.artist)
         item.setToolTip(str(music.artist))
         self.tb_local_music.setItem(i, 2, item)
         item = QTableWidgetItem(music.album)
         item.setToolTip(str(music.album))
         self.tb_local_music.setItem(i, 3, item)
         item = QTableWidgetItem(util.format_time(music.duration))
         item.setToolTip(util.format_time(music.duration))
         self.tb_local_music.setItem(i, 4, item)
         item = QTableWidgetItem(music.size)
         item.setToolTip(str(music.size))
         self.tb_local_music.setItem(i, 5, item)
     self.setIconItem()
Esempio n. 2
0
    def show_musics_data(self):
        self.music_list_name.setText(self.curMusicList.name)
        self.music_list_date.setText("%s创建" % self.curMusicList.created)
        self.lb_music_count.setText(
            "<p>歌曲数</p><p style='text-align: right'>%d</p>" %
            len(self.curMusicList.musics))
        self.lb_played_count.setText(
            "<p>播放数</p><p style='text-align: right'>%d</p>" %
            self.curMusicList.play_count)

        self.musics.clearContents()
        self.musics.setRowCount(len(self.curMusicList.musics))
        musics__ = self.curMusicList
        for i in range(len(musics__.musics)):
            music = musics__.get(i)
            item = QTableWidgetItem()
            item.setData(Qt.UserRole, music)
            item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
            item.setText(str(i + 1) + " ")
            self.musics.setItem(i, 0, item)
            item = QTableWidgetItem(str(music.title))
            item.setToolTip(music.title)
            self.musics.setItem(i, 1, item)
            item = QTableWidgetItem(music.artist)
            item.setToolTip(music.artist)
            self.musics.setItem(i, 2, item)
            item = QTableWidgetItem(music.album)
            item.setToolTip(music.album)
            self.musics.setItem(i, 3, item)
            item = QTableWidgetItem(util.format_time(music.duration))
            item.setToolTip(util.format_time(music.duration))
            self.musics.setItem(i, 4, item)
Esempio n. 3
0
    def show_musics_data(self):

        self.musics_table.clearContents()
        self.musics_table.setRowCount(len(self.cur_music_list.musics_table))
        musics__ = self.cur_music_list
        for i in range(len(musics__.musics_table)):
            music = musics__.get(i)
            item = QTableWidgetItem()
            item.setData(Qt.UserRole, music)
            item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
            item.setText(str(i + 1) + " ")
            self.musics_table.setItem(i, 0, item)
            item = QTableWidgetItem(str(music.title))
            item.setToolTip(music.title)
            self.musics_table.setItem(i, 1, item)
            item = QTableWidgetItem(music.artist)
            item.setToolTip(music.artist)
            self.musics_table.setItem(i, 2, item)
            item = QTableWidgetItem(music.album)
            item.setToolTip(music.album)
            self.musics_table.setItem(i, 3, item)
            item = QTableWidgetItem(util.format_time(music.duration))
            item.setToolTip(util.format_time(music.duration))
            self.musics_table.setItem(i, 4, item)
        # 若当前播放的音乐属于该歌单, 则为其设置喇叭图标
        self.set_icon_item()
Esempio n. 4
0
    def onPositionChanged(self, position: int):
        duration = self.player.getDuration()
        if duration == 0:
            return
        # 进度条
        pos = int(float(position) / duration * 100)
        if not self.slider_progress.isSliderDown():
            self.slider_progress.setValue(pos)

        # duration label
        format_duration = util.format_time(int(duration / 1000))
        self.label_duration.setText(format_duration)

        # position label
        position = util.format_time(int(position / 1000))
        self.label_pos.setText(position)
Esempio n. 5
0
    def show_data(self, play_list):
        self.setGeometry(self.parent().width() - 580, 150, 580,
                         self.parent().height() - 150 - 48)
        self.tableWidget.clearContents()
        self.tableWidget.setRowCount(play_list.size())
        self.label.setText("共%d首" % play_list.size())
        icon = QIcon("./resource/image/链接.png")

        for i in range(play_list.size()):
            self.btn_link = QLabel(self.tableWidget)
            self.btn_link.setStyleSheet("background-color:rgba(0,0,0,0)")
            self.btn_link.setPixmap(QPixmap("./resource/image/链接.png"))
            self.btn_link.setAlignment(Qt.AlignCenter)
            self.btn_link.setCursor(Qt.PointingHandCursor)
            # self.btn_link.installEventFilter(self)

            # icon_item = QTableWidgetItem()
            # icon_item.setIcon(icon)
            music = play_list.get(i)
            self.tableWidget.setItem(i, 0, QTableWidgetItem("\t"))
            self.tableWidget.setItem(i, 1, QTableWidgetItem(music.title))
            self.tableWidget.setItem(i, 2, QTableWidgetItem(music.artist))
            # self.tableWidget.setItem(i, 3, icon_item)
            self.tableWidget.setCellWidget(i, 3, self.btn_link)

            self.tableWidget.setItem(
                i, 4, QTableWidgetItem(util.format_time(music.duration)))

        # 为当前音乐设置喇叭图标
        icon_label = QLabel()
        icon_label.setPixmap(QPixmap("./resource/image/musics_play.png"))
        icon_label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
        icon_label.setCursor(Qt.PointingHandCursor)
        self.tableWidget.setCellWidget(play_list.get_current_index(), 0,
                                       icon_label)
def train_model(model,
                name,
                train_dataloader,
                validation_dataloader,
                filepath='',
                lr=2e-5,
                EPOCHS=7,
                BATCH_SIZE=1,
                weight_decay=0.9,
                eps=1e-7):
    training_stats = []
    torch.cuda.empty_cache()
    model = model.to(DEVICE)
    optimizer = AdamW(model.parameters(),
                      lr=lr,
                      weight_decay=weight_decay,
                      eps=eps)

    loss_func = torch.nn.NLLLoss()
    training_loss_history = []
    val_loss_history = []
    for epoch_num in range(EPOCHS):
        t0 = time.time()
        model.train()
        total_train_loss = 0
        for step_num, batch_data in enumerate(train_dataloader):
            input_ids, attention_masks, anger, fear, joy, sadness, vec, intensity = tuple(
                t for t in batch_data)

            input_ids = input_ids.to(DEVICE)
            attention_masks = attention_masks.to(DEVICE)
            anger = anger.to(DEVICE)
            fear = fear.to(DEVICE)
            joy = joy.to(DEVICE)
            sadness = sadness.to(DEVICE)
            intensity = intensity.to(DEVICE)
            vec = vec.to(DEVICE)

            model.zero_grad()

            probas = model(input_ids, attention_masks)
            loss = loss_func(probas, intensity)

            total_train_loss += loss.item()
            loss.backward()
            torch.torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            optimizer.step()
            # scheduler.step()
            print('Epoch: ', epoch_num + 1)
            print("\r" + "{0}/{1} loss: {2} ".format(
                step_num,
                len(train_dataloader) / BATCH_SIZE, total_train_loss /
                (step_num + 1)))
        avg_train_loss = total_train_loss / len(train_dataloader)

        dropout = model.dropout.p
        parameter_tuned = '_lr_' + str(lr) + '_dropout_' + str(
            dropout) + '_weight_decay_' + str(weight_decay) + '_eps_' + str(
                eps)
        model_save_name = filepath + name + '_epoch_' + str(
            epoch_num) + parameter_tuned + '.pt'
        torch.save(model.state_dict(), model_save_name)
        training_time = format_time(time.time() - t0)
        model.eval()

        total_pearson = 0
        total_pearson_some = 0
        total_kappa = 0
        total_kappa_some = 0

        total_eval_loss = 0

        for batch_data in validation_dataloader:
            input_ids, attention_masks, anger, fear, joy, sadness, vec, intensity = tuple(
                t for t in batch_data)

            input_ids = input_ids.to(DEVICE)
            attention_masks = attention_masks.to(DEVICE)
            anger = anger.to(DEVICE)
            fear = fear.to(DEVICE)
            joy = joy.to(DEVICE)
            sadness = sadness.to(DEVICE)
            intensity = intensity.to(DEVICE)
            vec = vec.to(DEVICE)

            with torch.no_grad():
                probas = model(input_ids, attention_masks)
                output = torch.max(probas, 1)[1]

                loss = loss_func(probas, intensity)

                # Accumulate the validation loss.
                total_eval_loss += loss.item()

                output = output.detach().cpu()
                intensity = intensity.to('cpu')

                # Calculate the accuracy for this batch of test sentences, and
                # accumulate it over all batches.

                pear, pear_some, kappa, kappa_some = evaluate_PerEmotion(
                    intensity, output)

                total_pearson += pear
                total_pearson_some += pear_some
                total_kappa += kappa
                total_kappa_some += kappa_some

        # Report the final accuracy for this validation run.
        avg_pearson = total_pearson / len(validation_dataloader)
        avg_some_pearson = total_pearson_some / len(validation_dataloader)
        avg_kappa = total_kappa / len(validation_dataloader)
        avg_some_kappa = total_kappa_some / len(validation_dataloader)

        # Calculate the average loss over all of the batches.
        avg_val_loss = total_eval_loss / len(validation_dataloader)

        val_time = format_time(time.time() - t0)

        training_loss_history.append(avg_train_loss)
        val_loss_history.append(avg_val_loss)

        # Record all statistics from this epoch.
        training_stats.append({
            'epoch': epoch_num + 1,
            'Training Loss': avg_train_loss,
            'Valid. Loss': avg_val_loss,
            'Pearson': avg_pearson,
            'Pearson Some': avg_some_pearson,
            'Kappa': avg_kappa,
            'Kappa Some': avg_some_kappa,
            'Learning Rate': lr,
            'Weight Decay': weight_decay,
            'Dropout': dropout,
            'Epsilon': eps,
            'Training Time': training_time,
            'Validation Time': val_time
        })
    return training_stats, training_loss_history, val_loss_history, parameter_tuned
def train(weight,
          model_num,
          model,
          train_dataloader,
          validation_dataloader,
          filepath='',
          lr=2e-5,
          EPOCHS=10,
          BATCH_SIZE=1):
    total_t0 = time.time()
    training_stats = []
    model = model.to(DEVICE)
    optimizer = AdamW(model.parameters(), lr=lr)

    weight = weight.to(DEVICE)
    loss_func = nn.NLLLoss(weight)
    loss_real = nn.NLLLoss()
    softmax = nn.LogSoftmax(dim=1)

    for epoch_num in range(EPOCHS):
        t0 = time.time()
        model.train()
        total_train_loss = 0
        for step_num, batch_data in enumerate(train_dataloader):
            input_ids, attention_masks, anger, fear, joy, sadness, vec, intensity = tuple(
                t for t in batch_data)

            ##ordinal
            o1 = torch.tensor((intensity.numpy() > 0).astype(int))
            o2 = torch.tensor((intensity.numpy() > 1).astype(int))
            o3 = torch.tensor((intensity.numpy() > 2).astype(int))

            if model_num == 1:
                o = o1
            if model_num == 2:
                o = o2
            if model_num == 3:
                o = o3
            ###

            input_ids = input_ids.to(DEVICE)
            attention_masks = attention_masks.to(DEVICE)
            anger = anger.to(DEVICE)
            fear = fear.to(DEVICE)
            joy = joy.to(DEVICE)
            sadness = sadness.to(DEVICE)
            intensity = intensity.to(DEVICE)
            vec = vec.to(DEVICE)
            o = o.to(DEVICE)

            model.zero_grad()

            probas = model(input_ids, attention_masks)
            loss = loss_func(probas, o)

            total_train_loss += loss.item()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            optimizer.step()
            # scheduler.step()
            print('Epoch: ', epoch_num + 1)
            print("\r" + "{0}/{1} loss: {2} ".format(
                step_num,
                len(train_dataloader) / BATCH_SIZE, total_train_loss /
                (step_num + 1)))
        avg_train_loss = total_train_loss / len(train_dataloader)

        #model_save_name = filepath + '_epoch_' + str(epoch_num) + '_lr_' + str(lr) + '_' + str(model_num) + '.pt'
        #torch.save(model.state_dict(), model_save_name)
        training_time = format_time(time.time() - t0)
        model.eval()

        total_pearson = 0
        total_kappa = 0
        total_eval_loss_r = 0
        total_eval_loss_w = 0

        for batch_data in validation_dataloader:
            input_ids, attention_masks, anger, fear, joy, sadness, vec, intensity = tuple(
                t for t in batch_data)
            ##ordinal
            o1 = torch.tensor((intensity.numpy() > 0).astype(int))
            o2 = torch.tensor((intensity.numpy() > 1).astype(int))
            o3 = torch.tensor((intensity.numpy() > 2).astype(int))

            if model_num == 1:
                o = o1
            elif model_num == 2:
                o = o2
            else:
                o = o3
            ###
            input_ids = input_ids.to(DEVICE)
            attention_masks = attention_masks.to(DEVICE)
            anger = anger.to(DEVICE)
            fear = fear.to(DEVICE)
            joy = joy.to(DEVICE)
            sadness = sadness.to(DEVICE)
            intensity = intensity.to(DEVICE)
            vec = vec.to(DEVICE)
            o = o.to(DEVICE)

            with torch.no_grad():
                probas = model(input_ids, attention_masks)
                output = torch.max(probas, 1)[1]

                lossr = loss_real(probas, o)
                lossw = loss_func(probas, o)
                # Accumulate the validation loss.
                total_eval_loss_r += lossr.item()
                total_eval_loss_w += lossw.item()

                output = output.detach().cpu()
                o = o.to('cpu')

                # Calculate the accuracy for this batch of test sentences, and
                # accumulate it over all batches.

                pear, _, kappa, _ = evaluate_PerEmotion(o, output)

                total_pearson += pear
                total_kappa += kappa

        # Report the final accuracy for this validation run.
        avg_pearson = total_pearson / len(validation_dataloader)
        avg_kappa = total_kappa / len(validation_dataloader)

        # Calculate the average loss over all of the batches.
        avg_val_loss_r = total_eval_loss_r / len(validation_dataloader)
        avg_val_loss_w = total_eval_loss_w / len(validation_dataloader)

        val_time = format_time(time.time() - t0)

        # Record all statistics from this epoch.
        training_stats.append({
            'epoch': epoch_num + 1,
            'Training Loss on 1 ordinal': avg_train_loss,
            'Valid. Loss on 1 ordinal, real': avg_val_loss_r,
            'Valid. Loss on 1 ordinal, weighted': avg_val_loss_w,
            'Pearson on 1 ordinal': avg_pearson,
            'Kappa on 1 ordinal': avg_kappa,
            'Learning Rate': lr,
            'Training Time': training_time,
            'Validation Time': val_time
        })

        print(training_stats)

    return training_stats, model