Пример #1
0
            discount=0.99,
            target_freq=10,
            verbose=True,
            print_every=10)
        '''

        agent = DQNAgent(action_set=[0, 1, 2],
                         reward_function=mountain_car_reward_function,
                         feature_extractor=MountainCarIdentityFeature(),
                         hidden_dims=[50, 50],
                         learning_rate=5e-4,
                         buffer_size=50000,
                         batch_size=64,
                         num_batches=100,
                         starts_learning=5000,
                         final_epsilon=0.02,
                         discount=0.99,
                         target_freq=10,
                         verbose=True,
                         print_every=10)

        _, _, rewards = live(agent=agent,
                             environment=env,
                             num_episodes=episodes,
                             max_timesteps=200,
                             verbose=True,
                             print_every=50)

        np.save(os.path.join(reward_path, file_name), rewards)
        agent.save(path=os.path.join(agent_path, file_name + '.pt'))
Пример #2
0
    def __init__(self, options):
        super(atropine, self).__init__()
        self.vchannels = collections.OrderedDict()
        self.fullscreen = options.fullscreen
        self.no_escape = options.no_escape
        self.max_brightness = options.max_brightness

        gm = guide_manager.guide_manager(self, options)
        cm = callsign_manager.callsign_manager(self, options)
        im = icon_manager.icon_manager(cm, options)

        gm.new_guide.connect(cm.new_guide)
        gm.new_guide.connect(self.guide_update)

        self.vchannel = None
        self.source = None
        self.resume_source = None
        self.channel_file = options.channel_file

        self.video = video_vlc.video_vlc()
        self.sources = dict()
        if len(options.hdhr_lineup_id):
            for lineup, tuners in options.hdhr_lineup_id.iteritems():
                self.sources[lineup] = source_hdhr.source_hdhr(
                    self.video, tuners)
        else:
            self.sources[''] = source_hdhr.source_hdhr(self.video)
        self.live = live.live(self, self.video, im, self.vchannels)
        self.guide = guide.guide_widget(self, options, self.video, im,
                                        self.vchannels)
        self.blank = Qt.QWidget(self)

        self.addWidget(self.live)
        self.addWidget(self.guide)
        self.addWidget(self.blank)

        self.video.setParent(self)
        self.video.show()

        self.resume_widget = None
        self.setCurrentWidget(self.live)

        self.live.clicked.connect(
            lambda w=self.guide: self.setCurrentWidget(w))
        self.guide.done.connect(lambda w=self.live: self.setCurrentWidget(w))

        self.paused_start = options.paused
        Qt.QTimer.singleShot(0, self.start)

        ss = """
            QWidget {
                border: none;
                background-color: rgb(60, 75, 90);
            }
            QLabel {
                font-family: sans-serif;
                font: 24pt;
                color: white;
            }
            video, video_proxy, guide_widget now_widget, time_header_widget {
                background-color: black
            }
            guide_widget station_logo_widget {
                background-color: rgb(150, 170, 190);
            }
            guide_widget program_info_widget {
                background-color: rgb(30, 30, 30);
                border: 2px solid black;
            }
            guide_widget program_info_widget QWidget {
                background-color: rgb(30, 30, 30);
            }
            guide_header_widget {
                background-color: darkblue;
            }
            station_info_guide_large QWidget {
                background-color: rgb(90, 105, 120);
            }
            info_widget, info_widget QWidget {
                background: none;
            }
            program_label {
                padding: 4px;
                border: 2px solid black;
            }
            program_label:focus {
                border: 6px solid white;
            }
            time_header_widget, now_widget {
                padding: 8px;
            }
            station_info_widget {
                padding: 6px;
            }
            osd_widget {
                margin: 50px;
            }
            guide_widget > station_info_widget {
                padding: 4px;
                font-size: 20pt;
            }
            QProgressBar {
                border: none;
                color: red;
            }
            QProgressBar::chunk {
                background-color: red;
            }
        """
        # https://raw.githubusercontent.com/MythTV/mythtv/master/mythtv/themes/default/categories.xml
        cat_file = os.path.join(__location__, 'categories.xml')
        cc = categories.category_colors(cat_file)
        for key, value in cc.iteritems():
            c = Qt.QColor(*value)
            c = Qt.QColor.fromHsvF(c.hsvHueF(), c.hsvSaturationF(),
                                   min(c.valueF(), self.max_brightness))
            #c = Qt.QColor.fromHslF(c.hslHueF(), c.hslSaturationF(), min(c.lightnessF(), self.max_brightness))
            c = str(c.getRgb()[0:3])
            ss += 'QLabel[category="%s"] { background-color: rgb%s; }\n' % (
                key, c)

        self.setStyleSheet(ss)
Пример #3
0
from sklearn.naive_bayes import BernoulliNB
from sklearn.tree import DecisionTreeClassifier
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn import metrics
import warnings
import math
from sklearn import preprocessing

p = "./models/shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(p)

from calibration import calibration
from live import live

#Run Calibration
mean, std = calibration()

print(mean, std)

data, result = live(mean, std)

for D, r in zip(data, result):
    print(D)
    print(r)
    print('\n')
Пример #4
0
    channel = client.get_channel(840211266869919784)
    await channel.send(f"{member} has left the server.")


@tasks.loop(seconds=300)
async def count():
    temp = client.get_channel(839976544692731985)

    channel = client.get_channel(840442030785691678)

    await channel.edit(name="Server Users: " + str(temp.guild.member_count))


@client.command()
async def clear(ctx, amount=sys.maxsize - 1):
    if (amount != sys.maxsize - 1):
        await ctx.channel.purge(limit=amount + 1)
    else:
        await ctx.channel.purge(limit=amount)


@client.command()
async def test(ctx):
    await ctx.send(ctx.guild.member_count)


# End of Bot

live()
client.run(token)
Пример #5
0
            action_set=[0, 1, 2],
            # reward_function=functools.partial(cartpole_reward_function, reward_type='sparse'),
            reward_function=functools.partial(Forex_reward_function),
            feature_extractor=ForexIdentityFeature(),
            hidden_dims=[50, 50],
            learning_rate=5e-4,
            buffer_size=5000,
            # batch_size=16,
            batch_size=8,
            num_batches=100,
            starts_learning=100,
            final_epsilon=0.02,
            discount=0.99,
            target_freq=10,
            verbose=False,
            print_every=10)

        _, _, rewards = live(
            agent=agent,
            environment=env,
            # num_episodes=100,
            # max_timesteps=3601,
            num_episodes=500,
            max_timesteps=3600,
            verbose=True,
            print_every=50)

        file_name = '|'.join(['dqn', str(seed)])
        np.save(os.path.join(reward_path, file_name), rewards)
        agent.save(path=os.path.join(agent_path, file_name + '.pt'))