Exemple #1
0
    def __init__(self, root_dir, source_limit, transform=None):

        self.world = World()

        self.speakers = []
        for d_name in os.listdir(root_dir):
            if os.path.isdir(os.path.join(root_dir, d_name)):
                self.speakers.append(d_name)
        print(self.speakers)

        self.data = []
        self.label = []
        for i, d_name in enumerate(self.speakers):
            data_dir = os.path.join(root_dir, d_name)
            if os.path.isfile(os.path.join(data_dir, d_name + "_mcep.pickle")):
                with open(os.path.join(data_dir, d_name + "_mcep.pickle"),
                          mode="rb") as data:
                    mceps = pickle.load(data)
                    mceps = mceps[:source_limit]
                    self.data.extend(mceps)
                    self.label.extend(np.ones((len(mceps))) * i)
                print("[{}] mcep loaded.".format(d_name))
            else:
                mceps = []
                f0s = []
                for f in tqdm_notebook(os.listdir(data_dir)):
                    if not ".wav" in f:
                        continue
                    file_path = os.path.join(data_dir, f)
                    wav, _ = librosa.load(file_path, sr=hparams.fs)
                    if len(wav) <= 10:
                        continue
                    wav, _ = librosa.effects.trim(wav)
                    wav = wav.astype(np.double)
                    f0, spec, ap = self.world.analyze(wav)
                    mcep = self.world.mcep_from_spec(spec)
                    mcep = mcep.reshape(mcep.shape[0], mcep.shape[1], 1)
                    if mcep.shape[0] < 128:
                        continue
                    mceps.append(mcep)
                    f0s.append(f0)
                # mceps = mceps[:source_limit]
                self.data.extend(mceps)
                self.label.extend(np.ones((len(mceps))) * i)
                with open(os.path.join(data_dir, d_name + "_mcep.pickle"),
                          mode='wb') as f:
                    pickle.dump(mceps, f)
                log_f0s_mean, log_f0s_std = logf0_statistics(f0s)
                mceps_mean, mceps_std = mcep_statistics(mceps)
                np.savez(os.path.join(data_dir, d_name + "_norm.npz"),
                         log_f0s_mean=log_f0s_mean,
                         log_f0s_std=log_f0s_std,
                         mceps_mean=mceps_mean,
                         mceps_std=mceps_std)
                print("[{}] voices converted.".format(d_name))

        self.transform = transform
        self.converter = Converter(root_dir, self.speakers)
    def addTweet(self, tweetStatus) -> None:
        # run SentimentAnalysis, score the tweet, append to tweet data
        sentimentScore: int = self._scoreTweet(tweetStatus)

        # add tweet to database - running the DAO method to add to the database
        screenName: str = tweetStatus['user']['screen_name']
        tweetID: str = tweetStatus['id']
        tweetText: str = tweetStatus['text']
        createdAt: str = Converter.convertDate(tweetStatus['created_at'])

        # get crypto ticker from tweet
        cryptoTicker: str = self._extractTicker(tweetText)

        if cryptoTicker == '':
            return

        tweetDAO = InfluencersTweetDAO(self.settings.value("DB_PATH", ''), self.settings.value("DB_NAME", ''))
        tweetDAO.add_influencer_tweet(
            screenName, tweetID, tweetText, createdAt, cryptoTicker, sentimentScore
        )

        tweet = {
            "screenName": screenName,
            "tweetID": tweetID,
            "tweetText": tweetText,
            "createdAt": createdAt,
            "cryptoTicker": cryptoTicker,
            "sentimentScore": sentimentScore
        }
        model: AppModel = cast(AppModel, self.model)
        model.addTweet(tweet)
Exemple #3
0
def main():
    args = parse_training_args("ESRGAN")
    epochs = args.epochs
    load_path = args.load
    init_path = args.init
    out_path = args.out
    cuda = args.cuda
    device = torch.device(
        'cuda' if torch.cuda.is_available() and cuda else 'cpu')

    g_net = DenseGenerator().to(device)
    g_criterion = PerceptualLoss(
        feature_extractor=TruncatedVgg(with_activation_layer=False),
        content_criterion=L1Loss(),
        adversarial_criterion=BCEWithLogitsLoss(),
    ).to(device)
    g_optimizer = Adam(params=filter(lambda p: p.requires_grad,
                                     g_net.parameters()),
                       lr=1e-4)
    g_scheduler = ReduceLROnPlateau(optimizer=g_optimizer,
                                    factor=0.5,
                                    patience=3,
                                    verbose=True)

    d_net = Discriminator().to(device)
    d_criterion = DiscriminatorLoss(criterion=BCEWithLogitsLoss()).to(device)
    d_optimizer = Adam(params=filter(lambda p: p.requires_grad,
                                     d_net.parameters()),
                       lr=1e-4)
    d_scheduler = ReduceLROnPlateau(optimizer=d_optimizer,
                                    factor=0.5,
                                    patience=3,
                                    verbose=True)

    converter = Converter()
    dataset = ImageNetDataset(json_path='data/train.json', converter=converter)
    data_loader = DataLoader(dataset=dataset,
                             batch_size=4,
                             num_workers=4,
                             pin_memory=True,
                             shuffle=True)

    trainer = ReGANTrainer(g_net=g_net,
                           g_criterion=g_criterion,
                           g_optimizer=g_optimizer,
                           g_scheduler=g_scheduler,
                           d_net=d_net,
                           d_criterion=d_criterion,
                           d_optimizer=d_optimizer,
                           d_scheduler=d_scheduler,
                           data_loader=data_loader,
                           device=device)

    if init_path:
        trainer.load_pretrained_generator(init_path)

    if load_path:
        trainer.load(load_path)

    trainer.train(max_epochs=epochs, save_path=out_path)
Exemple #4
0
def convert(message: telebot.types.Message):
    try:
        values = message.text.split(' ')

        if len(values) != 3:
            raise ConvertionException('Слишком много параметров')

        quote, base, amount = values
        total_base = Converter.convert(quote, base, amount)
    except ConvertionException as e:
        bot.reply_to(message, f'Ошибка пользователя\n{e}')
    except Exception as e:
        bot.reply_to(message, f'Не удалось обработать команду\n{e}')
    else:
        text = f'Цена {amount} {quote} в {base} - {total_base}'
        bot.send_message(message.chat.id, text)
def main():
    args = parse_training_args("SRResNet")
    epochs = args.epochs
    load_path = args.load
    out_path = args.out
    cuda = args.cuda
    device = torch.device(
        'cuda' if torch.cuda.is_available() and cuda else 'cpu')

    net = Generator().to(device)
    criteria = MSELoss().to(device)
    optimizer = Adam(params=filter(lambda p: p.requires_grad,
                                   net.parameters()),
                     lr=1e-4)
    scheduler = ReduceLROnPlateau(optimizer,
                                  patience=3,
                                  factor=0.5,
                                  verbose=True)

    dataset = ImageNetDataset(json_path='data/train.json',
                              converter=Converter())
    data_loader = DataLoader(dataset=dataset,
                             batch_size=16,
                             num_workers=4,
                             pin_memory=True,
                             shuffle=True)

    trainer = NetTrainer(net=net,
                         criterion=criteria,
                         optimizer=optimizer,
                         scheduler=scheduler,
                         data_loader=data_loader,
                         device=device)

    if load_path:
        trainer.load(load_path)

    trainer.train(max_epochs=epochs, save_path=out_path)
Exemple #6
0
def weight_init(module):
    class_name = module.__class__.__name__
    if class_name.find('Conv') != -1:
        module.weight.data.normal_(0, 0.02)
    if class_name.find('BatchNorm') != -1:
        module.weight.data.normal_(1, 0.02)
        module.bias.data.fill_(0)


crnn.apply(weight_init)

loss_function = CTCLoss(zero_infinity=True)
loss_function = loss_function.cuda()
optimizer = Adadelta(crnn.parameters())
converter = Converter(option.alphabet)
print_every = 100
total_loss = 0.0


def validation():
    print('start validation...')
    crnn.eval()
    total_loss = 0.0
    n_correct = 0
    for i, (input, label) in enumerate(validationset_dataloader):
        if i == len(validationset_dataloader) - 1:
            continue
        if i == 9:
            break
        label_tmp = label
Exemple #7
0
def main(explore, exploit, trace):
    agent = Agent(pos=(0, 0))               # create an agent at initial position (0,0)
    env = Environment()                     # create an environment
    convert = Converter(env)

    n_a = env.action_space_n                # get the size of action space
    n_s = env.state_space_n                 # get the size of state space

    q_table = np.zeros([n_s, n_a])

    if trace == 2:
        e_table = np.zeros([n_s, n_a])          # initialize the eligibility trace
    else:
        e_table = None
    if explore == 1:
        ex_method = "greedy"
    else:
        ex_method = "softmax"

    n_episode = 1000
    n_timestep = 500

    window = 200
    cleaning_rate = []
    returns = deque(maxlen=window)
    avg_rt_count = []


    # for each episode
    for i_episode in range(n_episode):
        s = convert.state2tile(env.reset())
        a = agent.get_action(s, q_table, method=ex_method)

        # for each epoch
        clean_rate = 0
        for t in range(n_timestep):

            # Act: take a step and receive (new state, reward, termination flag, additional information)
            s_prime, reward, done, info = env.step(a)
            agent.store_transition(s, a, reward)

            # if it is the last episode, print out info (to avoid print out too much)
            if (i_episode == n_episode - 1):
                env.display()
                print(info)

            # Select an action
            '''We need to give method explicitely {"softmax", "greedy"}'''
            good_acts = []
            (_x, _y) = (s_prime[0], s_prime[1])
            if (_x - 1, _y) in env.trashes:
                good_acts.append(0)
            if (_x + 1, _y) in env.trashes:
                good_acts.append(1)
            if (_x, _y - 1) in env.trashes:
                good_acts.append(2)
            if (_x, _y + 1) in env.trashes:
                good_acts.append(3)
            s_prime = convert.state2tile(s_prime)
            a_prime = agent.get_action(s_prime, q_table, good_acts=good_acts, method=ex_method)

            # Update a Q value table
            '''
            Update method is implicitely given according to
            the number of parameters
            '''
            if exploit == 1:
                agent.update(q_table, s, a, reward, s_prime, a_prime = None, e_table=e_table)
            else:
                agent.update(q_table, s, a, reward, s_prime, a_prime, e_table=e_table)

            # Transition to new state
            s = s_prime
            a = a_prime

            if done:
                reward_0 = agent.discounted_return()[0]
                clean_rate = (env.nb_trashes - len(env.trashes)) / env.nb_trashes
                returns.append(reward_0)
                avg_rt_count.append(np.average(returns))
                print("Episode: {0}\t Nb_Steps{1:>4}\t Epsilon: {2:.3f}\t Tau: {3:.3f}\t Clean Rate: {4:.3f}\t Discounted_return: {5:.3f}\t".format(
                    i_episode, t + 1, agent.epsilon, agent.tau, clean_rate, reward_0))
                # print(info)
                break

        agent.ep_rs.clear()
        agent.ep_obs.clear()
        agent.ep_as.clear()

        agent.epsilon = agent.epsilon * agent.epsilon_decay
        agent.tau = agent.init_tau + i_episode * agent.tau_inc
        cleaning_rate.append(clean_rate)

    plt.ioff()
    fig = plt.figure(figsize=(7, 9))
    ax1 = fig.add_subplot(3, 1, 1)
    ax1.scatter(range(n_episode), cleaning_rate, color='r', label="Cleaning rate")
    ax1.legend()
    ax2 = fig.add_subplot(3, 1, 2)
    moving_avg = rolling_mean(cleaning_rate, n = window)
    ax2.plot(range(len(moving_avg)), moving_avg, color='r', label="Rolling average cleaning rate")
    ax2.legend()
    ax3 = fig.add_subplot(3, 1, 3)
    ax3.plot(range(len(avg_rt_count)), avg_rt_count, color='r', label="Rolling average discounted return")
    ax3.legend()
    plt.show()
Exemple #8
0
"""
Author: Sulley
Date: 2020.2.29
"""

import chardet
import codecs
import os
import sys
import csv
import xlrd
import docx
import jieba
import itertools, string
from pypinyin import pinyin, lazy_pinyin, Style
from PyQt5.Qt import *
from utils import Converter, Counter, Extractor, Corpus, Lexicon
from window import Window, EmittingStream

if __name__ == '__main__':
    converter = Converter()
    counter = Counter(converter)
    extractor = Extractor(converter)
    corpus = Corpus()
    lexicon = Lexicon()

    app = QApplication(sys.argv)
    exe = Window(converter, counter, extractor, corpus, lexicon)
    sys.exit(app.exec_())
Exemple #9
0
from PIL import Image
from torchvision import transforms
from crnn import CRNN
import torch
from utils import Converter

print('load input image...')
image = Image.open('demo_1.png').convert('L')
transform = transforms.Compose(
    [transforms.Resize((32, 100)),
     transforms.ToTensor()])
image = transform(image)
image = image.unsqueeze(0)
image = image.cuda()

print('load trained model...')
crnn = CRNN(1, 38, 256)
crnn = crnn.cuda()
crnn.load_state_dict(torch.load('trained_model/crnn.pth'))

crnn.eval()
predicted_label = crnn(image)

_, predicted_label = predicted_label.max(2)
predicted_label = predicted_label.transpose(1, 0).contiguous().view(-1)
converter = Converter('0123456789abcdefghijklmnopqrstuvwxyz*')
predicted_length = [predicted_label.size(0)]
predicted_label = converter.decode(predicted_label,
                                   predicted_length,
                                   raw=False)
print('predicted label: %s' % (predicted_label))
Exemple #10
0
import pygame, sys
from pygame.locals import *
from environment import Environment
from agent import Agent, Action
from main import *
import numpy as np
from utils import Converter

######################################################################################
#      teaching the agent to clean trashes properly with reinforcement learning      #
######################################################################################

agent = Agent(pos=(0, 0))  # create a new agent
env = Environment()  # add the agent to the environment
convert = Converter(env)

facteur = 50
agent_pos = env.position  # get the agent's position
agent_pos = (agent_pos[0] * facteur, agent_pos[1] * facteur
             )  # multiply it by a factor

n_a = env.action_space_n  # get the action space size
n_s = env.state_space_n  # get the state space size

q_table = np.zeros([n_s, n_a])  # init Q table
e_table = np.zeros([n_s, n_a])  # init eligibility traces

# cleaning rate for each episode
clean_rate = []
crashes = []