Example #1
0
def from_to_telegram(from_entity, to_entity, limit):
    reverse_messages = []
    for msg in client.get_messages(from_entity, limit=limit):
        if msg.id > 570:
            reverse_messages.append(msg)


    reverse_messages.reverse()
    for message in reverse_messages:
        if type(message) != MessageService:
            if hasattr(message, 'media'):

                if hasattr(message.media, 'webpage'):
                    client.send_message(to_entity, message=Filter.filter_message(message.message), reply_to=None,
                                        parse_mode='md',
                                        link_preview=True,
                                        file=None, force_document=False)
                else:
                    client.send_message(to_entity, message=Filter.filter_message(message.message), reply_to=None,
                                        parse_mode='md',
                                        link_preview=True,
                                        file=message.media, force_document=False)
                print("Message id: %s Forwarded." % (message.id))
        else:
            print("--------Messave Service")
Example #2
0
    def __init__(self):

        self.seed = args.seed
        np.random.seed(self.seed)

        # Init gym env and set the env seed
        self.env = gym.make(args.env)
        self.env.seed(self.seed)
        self.state_dim = self.env.observation_space.shape[0]
        self.action_dim = self.env.action_space.shape[0]
        self.max_action = int(self.env.action_space.high[0])

        # Init parameters
        self._init_parameters()

        self.filter = Filter(self.state_dim)
        self.policy = Policy(self.state_dim, self.action_dim, args)
        self.noise = Noise(self.policy.w_policy.size, args)
        self.ddpg = DDPG(self.state_dim, self.action_dim, self.max_action, args)
Example #3
0
File: ars.py Project: marsXyr/GESRL
    def __init__(self):

        self.seed = args.seed
        np.random.seed(self.seed)

        # Init gym env and set the env seed
        self.env = gym.make(args.env)
        self.env.seed(self.seed)
        self.state_dim = self.env.observation_space.shape[0]
        self.action_dim = self.env.action_space.shape[0]

        # Init parameters
        self._init_parameters()

        # Init filter, normalizes the input states by tracking the mean and std of states.
        self.filter = Filter(self.state_dim)
        # Init policy, we use linear policy here
        self.policy = Policy(self.state_dim, self.action_dim, args)
        # Init the noise generator
        self.noise = Noise(self.policy.w_policy.shape)
Example #4
0
def questao_10():
    # -----------------------------------------------------------
    # Exercício 10
    # -----------------------------------------------------------
    N = 99
    # Resposta ao impulso
    h = [0, 1, 2, 3, 4, 4, 3, 2, 1, 0]

    channel = Filter(h=h)
    bin_gen = BinaryHardLimiter()

    channel_eq = ChannelEqualization(bin_gen,
                                     channel,
                                     N=N,
                                     input_delay=int((len(h) + N + 1) / 2),
                                     noise=GaussianNoise(std=np.sqrt(1e-2)))

    mu_max = 1 / (N + 1)
    E_hist, W_hist = lms(channel_eq,
                         0.006 * mu_max,
                         N + 1,
                         max_runs=50,
                         max_iter=50000)

    b_adap = np.mean(W_hist, axis=0)[-1, :].conj()
    mse = np.mean(np.abs(E_hist)**2, axis=0)

    plt.figure()
    plt.plot(10 * np.log10(mse))
    plt.xlabel('iteração')
    plt.ylabel('MSE, dB')
    # plt.savefig('ex10-mse.pdf', dpi=300, bbox_inches='tight')
    plt.show()

    w_adap, h_adap = signal.freqz(b_adap)
    w_0, h_0 = signal.freqz(h)

    plt.plot(w_0 / np.pi, 20 * np.log10(np.abs(h_0)), label='sistema')
    plt.plot(w_adap / np.pi,
             20 * np.log10(np.abs(h_adap)),
             'r--',
             label='filtro adap.')
    plt.plot(w_0 / np.pi, 20 * np.log10(np.abs(h_0 * h_adap)), label='eq.')
    plt.xlabel('frequência, rad/s')
    plt.ylabel('magnitude, dB')
    plt.legend()
    plt.ylim([-100, 30])
    # plt.savefig('ex10-eq.pdf', dpi=300, bbox_inches='tight')

    plt.plot(
        signal.convolve(np.mean(np.mean(W_hist, axis=0)[2700:], axis=0), h))
    plt.xlabel('amostra')
    plt.ylabel('amplitude')
Example #5
0
class GESRL:
    def __init__(self):

        self.seed = args.seed
        np.random.seed(self.seed)

        # Init gym env and set the env seed
        self.env = gym.make(args.env)
        self.env.seed(self.seed)
        self.state_dim = self.env.observation_space.shape[0]
        self.action_dim = self.env.action_space.shape[0]
        self.max_action = int(self.env.action_space.high[0])

        # Init parameters
        self._init_parameters()

        self.filter = Filter(self.state_dim)
        self.policy = Policy(self.state_dim, self.action_dim, args)
        self.noise = Noise(self.policy.w_policy.size, args)
        self.ddpg = DDPG(self.state_dim, self.action_dim, self.max_action,
                         args)

    def _init_parameters(self):

        self.log_dir = args.dir_path
        # The max steps per episode
        self.max_ep_len = args.max_ep_len
        self.epochs = args.epochs
        self.save_freq = args.save_freq
        self.start_epoch = args.start_epoch
        self.rl_train_steps = args.rl_train_steps
        self.pop_size = args.pop_size
        self.elite_size = args.elite_size
        # subspace dimension
        self.k = args.k

    def evaluate(self, eval=False):
        state, done, ep_reward, ep_len = self.env.reset(), False, 0.0, 0
        while not done and ep_len < self.max_ep_len:
            self.filter.push(state)
            state = self.filter(state)
            action = self.policy(state)
            next_state, reward, done, _ = self.env.step(action)
            if not eval:
                done = False if ep_len + 1 == self.max_ep_len else done
                self.ddpg.replay_buffer.store(
                    (state, next_state, action, reward, done))
            ep_reward += reward
            ep_len += 1
            state = next_state
        return ep_reward, ep_len

    def train(self):

        for epoch in range(self.epochs):
            surr_grads = []
            ddpg_grads = 0
            if epoch >= self.start_epoch:
                self.ddpg.actor.set_params(self.policy.w_policy)
                self.ddpg.actor_t.set_params(self.policy.w_policy)

                for step in range(self.rl_train_steps):
                    grad = self.ddpg.train()
                    ddpg_grads += grad
                    if step >= self.rl_train_steps - self.k:
                        surr_grads.append(grad.flatten())

                self.policy.update_by_ddpg(ddpg_grads / self.rl_train_steps)
                # if epoch % 50 == 0:
                #     self.ddpg.replay_buffer.buffer_flush()
                # self.policy.w_policy = self.ddpg.actor.get_params()

                self.noise.update(np.array(surr_grads).T)

            epsilons = self.noise.sample(
                self.pop_size)  # policy_size x pop_size

            pos_rewards, neg_rewards = [], []
            policy_weights = self.policy.w_policy  # action_dim x state_dim
            for epsilon in epsilons:
                self.policy.w_policy = policy_weights + epsilon.reshape(
                    self.policy.w_policy.shape)
                pos_reward, pos_len = self.evaluate()
                pos_rewards.append(pos_reward)

                self.policy.w_policy = policy_weights - epsilon.reshape(
                    self.policy.w_policy.shape)
                neg_reward, neg_len = self.evaluate()
                neg_rewards.append(neg_reward)
            self.policy.w_policy = policy_weights

            std_rewards = np.array(pos_rewards + neg_rewards).std()

            if self.elite_size != 0:
                scores = {
                    k: max(pos_reward, neg_reward)
                    for k, (
                        pos_reward,
                        neg_reward) in enumerate(zip(pos_rewards, neg_rewards))
                }
                sorted_scores = sorted(scores.keys(),
                                       key=lambda x: scores[x],
                                       reverse=True)[:self.elite_size]
                elite_pos_rewards = [pos_rewards[k] for k in sorted_scores]
                elite_neg_rewards = [neg_rewards[k] for k in sorted_scores]
                elite_epsilons = [epsilons[k] for k in sorted_scores]
                self.policy.update_by_ges(elite_pos_rewards, elite_neg_rewards,
                                          elite_epsilons, std_rewards)
            else:
                self.policy.update_by_ges(pos_rewards, neg_rewards, epsilons,
                                          std_rewards)

            if epoch % self.save_freq == 0:
                train_rewards = np.array(pos_rewards + neg_rewards)
                test_rewards = []
                for _ in range(10):
                    reward, _ = self.evaluate()
                    test_rewards.append(reward)
                test_rewards = np.array(test_rewards)

                np.savez(self.log_dir + '/policy_weights',
                         self.policy.w_policy)
                logz.log_tabular("Epoch", epoch)
                logz.log_tabular("AverageTrainReward", np.mean(train_rewards))
                logz.log_tabular("StdTrainRewards", np.std(train_rewards))
                logz.log_tabular("MaxTrainRewardRollout",
                                 np.max(train_rewards))
                logz.log_tabular("MinTrainRewardRollout",
                                 np.min(train_rewards))
                logz.log_tabular("AverageTestReward", np.mean(test_rewards))
                logz.log_tabular("StdTestRewards", np.std(test_rewards))
                logz.log_tabular("MaxTestRewardRollout", np.max(test_rewards))
                logz.log_tabular("MinTestRewardRollout", np.min(test_rewards))
                logz.dump_tabular()
Example #6
0
 def __init__(self, filter_obj=None):
     if not filter_obj:
         filter_obj = Filter()
     self.filter_obj = filter_obj
     self.post2freq = {}
     self.resp2freq = {}
Example #7
0
        data = self.load(coarse_fp)
        self.post2freq, self.resp2freq = self.get_data_frequency(data)
        del data

        worker = Worker(coarse_fp, tgt_fp, self.global_preprocess)
        mp = MultiProcessor(worker, pid_num)
        mp.run()
        print("All Global Processes Done.")
        worker.merge_result(keep_pid_file=keep_pid_file)


if __name__ == '__main__':
    os.system("mkdir -p logging")
    pid_num = 25
    P = Preprocessor()
    filter_obj = Filter(logging_fp="./logging/filter_all.log")
    P = Preprocessor(filter_obj)
    src_fp, tgt_fp = 'test/Weibo_24_35.sample.10000', 'test/weibo_corpus'
    # src_fp, tgt_fp = '/home/xiaohe_li/0_data/Weibo_24_35.raw.out', '/home/xiaohe_li/0_data/weibo_whole.corpus'
    P.run_mp(src_fp, tgt_fp, pid_num, keep_pid_file=False)

    # data = P.load(src_fp)
    # data = P.run(data)
    # P.save('corpus/weibo_corpus', data)

    print("Begin Cut Sentences.")
    cutted_fp = tgt_fp + '.cutted'
    cutter = Cutter(method='pkuseg')
    worker = Worker(tgt_fp, cutted_fp, cutter.cut_data_line)
    mp = MultiProcessor(worker, pid_num)
    mp.run()
Example #8
0
 def __init__(self):
     with open('data/config.json') as config_file:
         self.config = json.load(config_file)
     self.filters_config = self.config["filters"]
     self.filter = Filter()
Example #9
0
def questao_25():
    # -----------------------------------------------------------
    # Exercício 25
    # -----------------------------------------------------------
    NFFT = 1024
    N = 19
    SNR = 20
    h = [.34 - .21 * 1j, .87 + .43 * 1j, .34 - .27 * 1j]
    qam = QAM()
    channel = Filter(h, [1.])
    data = ChannelEqualization(qam,
                               channel,
                               N=N,
                               input_delay=int((N + len(h)) / 2),
                               noise=GaussianNoise,
                               SNR=SNR)
    a = qam(100000, )
    A = toeplitz(np.hstack([a[0], np.zeros(N)]), a)
    R = A.dot(A.T.conj()) / 100000
    trR = R.trace()
    mu_max = 1 / trR
    MSE, W = [], []
    for mu in (mu_max / 2, mu_max / 10, mu_max / 50):
        E_hist, W_hist = lms(data,
                             mu,
                             N + 1,
                             max_runs=50,
                             max_iter=5000,
                             dtype='complex128',
                             print_every=-1)

        MSE.append(np.mean(np.abs(E_hist)**2, axis=0))
        W.append(np.mean(W_hist, axis=0))
    plt.figure()
    for mse, name in zip(
            MSE,
        ['$\\mu_{\\max}/2$', '$\\mu_{\\max}/10$', '$\\mu_{\\max}/50$']):
        plt.plot(10 * np.log10(mse), label=name)
    plt.legend()
    plt.xlabel('Iteração')
    plt.ylabel('MSE (em dB)')
    # plt.savefig('ex25-mse-{}.pdf'.format(N+1), dpi=300, bbox_inches='tight')
    plt.show()

    b_adap = np.mean(W_hist, axis=0)[-1, :].conj()
    mse = np.mean(np.abs(E_hist)**2, axis=0)

    plt.figure()
    freqs = np.linspace(-1, 1, NFFT)
    plt.plot(freqs, 20 * np.log10(fft(h, n=NFFT)), label='Canal')
    plt.plot(freqs,
             20 * np.log10(fft(b_adap, n=NFFT)),
             'r--',
             label='Equalizador')
    plt.plot(freqs,
             20 * np.log10(np.abs(fft(np.convolve(b_adap, h), n=NFFT))),
             'y--',
             label='Canal equalizado')
    plt.xlim([-1, 1])
    plt.legend()
    plt.xlabel('Frequência normalizada')
    plt.ylabel('Magnitude (em dB)')
    plt.show()
    # plt.savefig('figs/ex25-freq-{}.pdf'.format(N+1), dpi=300, bbox_inches='tight')

    plt.figure()
    plt.plot(signal.convolve(b_adap, h))
    plt.xlabel('Amostra')
    plt.ylabel('Amplitude')
    plt.show()
    # plt.savefig('ex25-tempo-{}.pdf'.format(N+1), dpi=300, bbox_inches='tight')

    tx = qam(100)
    rx = signal.lfilter(h, [1.], tx)
    noise = GaussianNoise(std=np.sqrt(rx.var() / (2 * SNR)), complex=True)(100)
    rx += noise
    rx_eq = np.convolve(rx, b_adap)

    plt.figure()
    plt.plot(np.real(rx), np.imag(rx), 'o', label='Recebido')
    plt.plot(np.real(rx_eq), np.imag(rx_eq), 'o', label='Equalizado')
    plt.plot(1, 1, 'ro', label='Alvo')
    plt.plot(1, -1, 'ro')
    plt.plot(-1, 1, 'ro')
    plt.plot(-1, -1, 'ro')
    plt.legend()
    plt.xlabel('Real')
    plt.ylabel('Imaginário')
    plt.show()
Example #10
0
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from image_processing import processing
from sliding_window import sliding_window
from scipy.optimize import least_squares
from utils.calculate import get_linear_function
from utils.filter import Filter
from utils.low_pass_filter import LowPassFilter
from utils.calculate import get_linear_function

f_cut = 100
freq = 100000

filter_left = Filter(f_cut=f_cut, freq=freq, num_data=3)
filter_right = Filter(f_cut=f_cut, freq=freq, num_data=3)
filter_deg = Filter(f_cut=f_cut, freq=freq, num_data=1)
filter_deg2 = LowPassFilter(0.75)

filter_coef_left = None
filter_coef_right = None

heading_deg = 0


def rint(point):
    return np.rint(point).astype(np.int32)


script_dir = os.path.dirname(__file__)
Example #11
0
File: ars.py Project: marsXyr/GESRL
class ARS:
    def __init__(self):

        self.seed = args.seed
        np.random.seed(self.seed)

        # Init gym env and set the env seed
        self.env = gym.make(args.env)
        self.env.seed(self.seed)
        self.state_dim = self.env.observation_space.shape[0]
        self.action_dim = self.env.action_space.shape[0]

        # Init parameters
        self._init_parameters()

        # Init filter, normalizes the input states by tracking the mean and std of states.
        self.filter = Filter(self.state_dim)
        # Init policy, we use linear policy here
        self.policy = Policy(self.state_dim, self.action_dim, args)
        # Init the noise generator
        self.noise = Noise(self.policy.w_policy.shape)

    def _init_parameters(self):

        self.log_dir = args.dir_path
        # The max steps per episode
        self.max_ep_len = args.max_ep_len
        self.epochs = args.epochs
        self.save_freq = args.save_freq
        self.pop_size = args.pop_size
        self.elite_size = args.elite_size
        self.noise_std = args.noise_std

    def evaluate(self):
        state, done, ep_reward, ep_len = self.env.reset(), False, 0.0, 0
        while not done and ep_len < self.max_ep_len:
            self.filter.push(state)
            state = self.filter(state)
            action = self.policy(state)
            state, reward, done, _ = self.env.step(action)
            ep_reward += reward
            ep_len += 1
        return ep_reward, ep_len

    def train(self):

        for epoch in range(self.epochs):
            # Sample noises from the noise generator.
            epsilons = self.noise.sample(self.pop_size)

            pos_rewards, neg_rewards = [], []
            policy_weights = self.policy.w_policy
            # Generate 2 * pop_size policies and rollouts.
            for epsilon in epsilons:
                self.policy.w_policy = policy_weights + self.noise_std * epsilon
                pos_reward, pos_len = self.evaluate()
                pos_rewards.append(pos_reward)

                self.policy.w_policy = policy_weights - self.noise_std * epsilon
                neg_reward, neg_len = self.evaluate()
                neg_rewards.append(neg_reward)
            self.policy.w_policy = policy_weights

            std_rewards = np.array(pos_rewards + neg_rewards).std()

            # ARS update
            if self.elite_size != 0:
                scores = {
                    k: max(pos_reward, neg_reward)
                    for k, (
                        pos_reward,
                        neg_reward) in enumerate(zip(pos_rewards, neg_rewards))
                }
                sorted_scores = sorted(scores.keys(),
                                       key=lambda x: scores[x],
                                       reverse=True)[:self.elite_size]
                elite_pos_rewards = [pos_rewards[k] for k in sorted_scores]
                elite_neg_rewards = [neg_rewards[k] for k in sorted_scores]
                elite_epsilons = [epsilons[k] for k in sorted_scores]
                self.policy.update(elite_pos_rewards, elite_neg_rewards,
                                   elite_epsilons, std_rewards)
            else:

                self.policy.update(pos_rewards, neg_rewards, epsilons,
                                   std_rewards)

            # Save policy and log the information
            if epoch % self.save_freq == 0:
                train_rewards = np.array(pos_rewards + neg_rewards)
                test_rewards = []
                for _ in range(10):
                    reward, _ = self.evaluate()
                    test_rewards.append(reward)
                test_rewards = np.array(test_rewards)

                np.savez(self.log_dir + '/policy_weights',
                         self.policy.w_policy)
                logz.log_tabular("Epoch", epoch)
                logz.log_tabular("AverageTrainReward", np.mean(train_rewards))
                logz.log_tabular("StdTrainRewards", np.std(train_rewards))
                logz.log_tabular("MaxTrainRewardRollout",
                                 np.max(train_rewards))
                logz.log_tabular("MinTrainRewardRollout",
                                 np.min(train_rewards))
                logz.log_tabular("AverageTestReward", np.mean(test_rewards))
                logz.log_tabular("StdTestRewards", np.std(test_rewards))
                logz.log_tabular("MaxTestRewardRollout", np.max(test_rewards))
                logz.log_tabular("MinTestRewardRollout", np.min(test_rewards))
                logz.dump_tabular()