コード例 #1
0
    def test_pod_usage(self):
        """
        Check if pod usage is calculated correctly.
        """
        self.mock_list_nodes.return_value = self.nodes_list
        self.mocked_binding.return_value = None
        self.mocked_call_api.side_effect = self.call_api_side_effect
        self.mocked_all_pods.return_value = self.pods_list
        sched = Scheduler()

        self.assertEqual(
            sched.podUsage(self.pods_list.items[0].metadata.name, self.pods_list.items[0].metadata.namespace)['cpu'],
            '1000000n')
        self.assertEqual(
            sched.podUsage(self.pods_list.items[1].metadata.name, self.pods_list.items[1].metadata.namespace)['memory'],
            '9000Ki')
        self.assertEqual(self.pods_list.items[2].metadata.name, 'test_pod_3')
        self.assertEqual(
            sched.podUsage(self.pods_list.items[2].metadata.name, self.pods_list.items[2].metadata.namespace)['memory'],
            '0Ki')
        return
コード例 #2
0
    def __initialize_all(self, _round):
        id_manager.reset_all()
        self.__cp = ControlPlane(self.__cp_config)
        self.__dp = DataPlane(self.__dp_config)
        self.__scheduler = Scheduler(self.__id, self.__cp, self.__dp)
        self.__mm = MetricManager(self.__id, self.__win_size, self.__acc,
                                  self.__metrics, _round)
        _switches = self.__dp.switches

        assert len(_switches) == len(self.__assoc)

        for _id, _controller_id_set in enumerate(self.__assoc):
            _assoc_controllers = self.__cp.get_controllers_by_ids(
                _controller_id_set)
            _rates = self.__assoc_costs["RATES"][_id]
            _assoc_costs = {
                _cid: PROCS[self.__assoc_costs["PROC"]](_rates[_idx], None)
                for _idx, _cid in enumerate(_controller_id_set)
            }
            _switches[_id].add_assoc_controllers(_assoc_controllers,
                                                 _assoc_costs)
コード例 #3
0
async def main():
    print_banner('Adhan Scheduler')
    args = parse_args()

    api = PrayerTimes()  # get prayer times for current date and location

    abs_path = Path(__file__).resolve()

    # set cronjob for all prayers
    scheduler = Scheduler(
        times=await api.get_times(),
        command=
        f'python {abs_path.parent}/play_adhan.py {args.speaker} --volume {args.volume}'
    )

    # Schedule this script to rerun everyday at midnight
    scheduler.schedule_job(
        name="Adhan Scheduler",
        time="00:00",
        command=f"python {abs_path} {args.speaker} --volume {int(args.volume)}'"
    )
コード例 #4
0
ファイル: simulator.py プロジェクト: Sasasu/Cluster-Simulator
    def __init__(self, cluster, json_dir, user_number):
        self.cluster = cluster
        self.log = Log()
        self.json_dir = json_dir
        self.cluster = cluster
        self.scheduler = Scheduler(cluster)
        self.block_list = list()
        self.job_list = list()  # list of lists. A job list for each user.
        self.event_queue = queue.PriorityQueue()
        self.timestamp = 0
        self.user_number = user_number
        self.total_application_type = 1
        self.app_map = OrderedDict()  # map from user id to app id
        self.job_durations = {}
        self.stage_durations = {}
        self.job_execution_profile = {
        }  # record the execution information of jobs
        # generate the job list for each user. All users share the rdd_list and block list
        for user_index in range(0, user_number):
            # each user randomly chooses an application
            # application_number = random.randint(1, self.total_application_type)
            application_number = user_index + 1
            self.app_map[user_index] = application_number
            # stage_profile_path = 'Workloads/stage_profile.json' % (json_dir, application_name)
            stage_profile_path = "Workloads/stage_profile.json"  # read stage_profile XX
            self.stage_profile = json.load(open(stage_profile_path, 'r'),
                                           object_pairs_hook=OrderedDict)
            print("stage_profile loaded")

            runtime_path = "Workloads/runtime.json"  # read runtime fille XX
            self.runtime_profile = json.load(open(runtime_path, 'r'),
                                             object_pairs_hook=OrderedDict)
            print("runtime_profile loaded")
            # self.generate_rdd_profile(user_index)

            job_path = "Workloads/job.json"  # read job file XX
            self.job_profile = json.load(open(job_path, 'r'),
                                         object_pairs_hook=OrderedDict)
            print("job_profile loaded")
            self.generate_job_profile(user_index)
コード例 #5
0
ファイル: controller.py プロジェクト: maw41/FRIBs_Voting
 def __init__(self, config):
     self.client_queue = Queue()
     self.votes = {}
     self.scheduler = Scheduler(config.ip, config.scheduler_port, config.id)
     self.remote_servers = []
     for rs in config.remote_servers:
         if rs.ip == None:
             remote_server = RemoteServer()
         else:
             remote_server = RemoteServer(rs.ip, rs.seed, rs.ports[0],
                                          rs.ports[1], rs.scheduler_port)
         remote_server.loadLut(rs.olut)
         self.remote_servers.append(remote_server)
     self.local_server = LocalServer(self.remote_servers[0],
                                     self.remote_servers[1],
                                     self.remote_servers[2])
     self.local_server.loadResultLut(config.result_lut)
     self.local_server.initiateTally(config.init_tally, 24)
     self.client_listener = Client(config.client_ip, config.client_port,
                                   self.client_queue)
     signal.signal(signal.SIGINT, self.stop)
     signal.signal(signal.SIGTERM, self.stop)
コード例 #6
0
    def test_update_nodes(self):
        """
        Test updateNodes and getNodeUsage methods.
        On this stage scheduler have all static and dynamic (usage)
        data of all nodes.
        """

        self.mock_list_nodes.return_value = self.nodes_list
        self.mocked_binding.return_value = None
        self.mocked_call_api.side_effect = self.call_api_side_effect

        sched = Scheduler()
        sched.update_nodes()

        self.assertNotEqual(sched.all_nodes, [])
        self.assertEqual(sched.all_nodes[0].metadata.name, 'control-plane')
        self.assertEqual(sched.all_nodes[1].metadata.name, 'worker-node')
        self.assertEqual(sched.all_nodes[0].pods.items, [])
        self.assertEqual(sched.all_nodes[0].usage, {"cpu": "200000000n", "memory": "2000000Ki"})
        self.assertEqual(sched.all_nodes[1].usage, {"cpu": "300000000n", "memory": "3000000Ki"})

        return
コード例 #7
0
def dschedule(interval_hours, dry_run=False):
    '''
        This function schedules processing SC API devices based
        on the result of a global query for data processing
        in the SC API
    '''
    try:
        df = ScApiDevice.search_by_query(key="postprocessing_id",
                                         value="not_null",
                                         full=True)
    except:
        pass
        return None

    # Check devices to postprocess first
    dl = []

    for device in df.index:
        std_out(f'[CHUPIFLOW] Checking postprocessing for {device}')
        scd = Device(descriptor={'source': 'api', 'id': device})
        # Avoid scheduling invalid devices
        if scd.validate(): dl.append(device)
        else: std_out(f'[CHUPIFLOW] Device {device} not valid', 'ERROR')

    for d in dl:
        # Set scheduler
        s = Scheduler()
        # Define task
        task = f'{config._device_processor}.py --device {d}'
        #Create log output if not existing
        dt = join(config.paths['tasks'], str(d))
        makedirs(dt, exist_ok=True)
        log = f"{join(dt, f'{config._device_processor}_{d}.log')}"
        # Schedule task
        s.schedule_task(task=task,
                        log=log,
                        interval=f'{interval_hours}H',
                        dry_run=dry_run,
                        load_balancing=True)
コード例 #8
0
ファイル: simulator.py プロジェクト: xiandong79/FSC_code
    def __init__(self, cluster, preference_value, json_dir, user_number, flag="initial"):
        self.flag = flag
        self.cluster = cluster
        self.preference_value = preference_value
        self.json_dir = json_dir
        self.cluster = cluster
        self.scheduler = Scheduler(cluster)
        self.block_list = list()
        self.job_list = list()  # list of lists. A job list for each user.
        self.event_queue = Q.PriorityQueue()
        self.timestamp = 0
        self.user_number = user_number
        self.job_durations = {}
        self.stage_durations = {}
        self.job_execution_profile = {}  # record the execution information of jobs
        self.time_out = 3  # added for delay scheduling
        self.threshold = 0.8
        self.threshold_step = 0.2

        # add by xiandong
        for user_index in range(0, user_number):
            self.job_execution_profile[user_index] = {}

        for user_index in range(0, user_number):
            """currently, we load the 'job info (job, stage, runtime)' for each user separately.
            which is equivalent to each user has 'exact same' job submition now!!! by xiandong
            """
            stage_profile_path = "Workloads/stage_profile.json"
            self.stage_profile = json.load(
                open(stage_profile_path, 'r'), object_pairs_hook=OrderedDict)

            runtime_path = "Workloads/runtime.json"
            self.runtime_profile = json.load(
                open(runtime_path, 'r'), object_pairs_hook=OrderedDict)

            job_path = "Workloads/job.json"
            self.job_profile = json.load(
                open(job_path, 'r'), object_pairs_hook=OrderedDict)
            self.generate_job_profile(user_index)
コード例 #9
0
def main():
    setup_logging()

    credentials = get_credentials()
    if credentials["username"] == "" or credentials["password"] == "":
        logger = logging.getLogger("eo")
        logger.error(
            "The username or password are blank. See code for how to set them. Exiting."
        )
        exit()

    eo = ElectricObject(username=credentials["username"],
                        password=credentials["password"])

    if len(sys.argv) > 1 and sys.argv[1] == "--once":
        show_a_new_favorite(eo)
        exit()

    scheduler = Scheduler(SCHEDULE,
                          lambda: show_a_new_favorite(eo),
                          schedule_jitter=SCHEDULE_JITTER)
    scheduler.run()
コード例 #10
0
ファイル: controller.py プロジェクト: Springuin/hydroctrl
 def __init__(self, config, ph_config, pump_x_config, pump_y_config,
              solution_tank_config, supply_tank_config):
     self.database = None
     self.thingspeak = None
     self.ph = PHInterface(ph_config)
     self.pump_x = PumpInterface(pump_x_config)
     self.pump_y = PumpInterface(pump_y_config)
     self.solution_tank = SolutionTankInterface(solution_tank_config)
     self.supply_tank = WaterTankInterface(supply_tank_config)
     self.scheduler = Scheduler(config['iteration_period'],
                                self._do_iteration_throw_only_fatal)
     self.valid_ph_range = config['valid_ph_range']
     self.valid_temperature_range = config['valid_temperature_range']
     self.valid_supply_tank_volume_range = config[
         'valid_supply_tank_volume_range']
     self.nutrients_concentration_per_ph = config[
         'nutrients_concentration_per_ph']
     self.min_pumped_nutrients = config['min_pumped_nutrients']
     self.desired_ph = config['desired_ph']
     self.solution_volume = config['solution_volume']
     self.proportional_k = config['proportional_k']
     self.solution_tank_is_full = True
コード例 #11
0
ファイル: world.py プロジェクト: THamza/randomPrograms
def setup():
    global time
    global scheduler
    global predictionModel

    time = Time(startingYear, startingMonth, startingDay, startingHour)
    predictionModel = keras.models.load_model(
        'C:\\Users\\touhs\\OneDrive\\Desktop\\thesises\\lstm_tso.pb')
    pred = predictionModel.predict([
        0.489865, 0.000000, 0.345712, 0.259122, 0.363029, 0.023878, 0.5345,
        0.173314, 0.853590, 0.575472, 0.773109, 0.005352, 0.803922, 0.209394,
        1.0, 1.0, 1.0, 1.000000, 0.0, 0.381369, 0.897196, 1.00, 0.333333,
        0.861111, 0.0, 0.0, 0.775, 0.191802, 0.901235, 0.921348, 0.133333,
        0.277778, 0.0, 0.0, 0.794872, 0.223952, 0.900901, 0.74, 0.055556,
        1.000000, 0.0, 0.0, 0.794872, 0.290011, 0.693548, 0.677419, 0.200000,
        0.138889, 0.0, 0.0, 0.794872, 0.243608, 0.821918, 0.728261, 0.046512,
        0.833333, 0.0, 0.0, 0.775
    ])
    print("Prediction:", pred)
    scheduler = Scheduler()
    for i in range(len(deviceLabels)):
        devices.append(Device(deviceLabels[i], deviceAverageUseTime[i], time))
コード例 #12
0
ファイル: votekick.py プロジェクト: mrpizza123/pysnip
    def __init__(self, instigator, victim, reason):
        self.protocol = protocol = instigator.protocol
        self.instigator = instigator
        self.victim = victim
        self.reason = reason
        self.votes = {instigator: True}
        self.ended = False

        protocol.irc_say(
            S_ANNOUNCE_IRC.format(instigator=instigator.name,
                                  victim=victim.name,
                                  reason=self.reason))
        protocol.send_chat(S_ANNOUNCE.format(instigator=instigator.name,
                                             victim=victim.name),
                           sender=instigator)
        protocol.send_chat(S_REASON.format(reason=self.reason),
                           sender=instigator)
        instigator.send_chat(S_ANNOUNCE_SELF.format(victim=victim.name))

        schedule = Scheduler(protocol)
        schedule.call_later(self.duration, self.end, S_RESULT_TIMED_OUT)
        schedule.loop_call(30.0, self.send_chat_update)
        self.schedule = schedule
コード例 #13
0
def read_sensors() -> None:
    cpu_mon = CPUTemp(f"/sys/class/thermal/thermal_zone0/temp")
    airq = AirQ(1)
    temphum = TempHum(2)
    db_handler = DatabaseHandler("test01")

    def db_write_helper() -> None:
        cpu_reading = cpu_mon.get_reading()
        db_handler.write_to_db("CPU-Temp", cpu_reading)
        #print(f"Wrote CPU Temperature: {cpu_reading[1]} at {cpu_reading[0]}")
        aq_reading = airq.get_reading()
        db_handler.write_to_db("AirQ", aq_reading)
        #print(f"Wrote Air Quality: {aq_reading[1]} at {aq_reading[0]}")
        rtemp_hum = temphum.get_reading("temperature")
        rtemp = (rtemp_hum[0], rtemp_hum[1][0])
        db_handler.write_to_db("Room_Temp", rtemp)
        #print(f"Wrote Room Temperature: {rtemp[1]} at {rtemp[0]}")
        rhum = (rtemp_hum[0], rtemp_hum[1][1])
        db_handler.write_to_db("Room_Hum", rhum)
        #print(f"Wrote Room Humidity: {rhum[1]} at {rhum[0]}")

    schd = Scheduler(300, db_write_helper)
    schd.start()
コード例 #14
0
def initialize():
    #create chain and api obj
    chain = MarkovTextGenerator2Node(words_store)
    auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_token_secret)
    api = tweepy.API(auth,
                     retry_count=10,
                     retry_delay=5,
                     retry_errors=set([503]))
    account_name = api.me().screen_name

    #setup id file and event scheduler
    latestidfile = open(idfile, 'r+')
    events = Scheduler()

    #setup logger
    logging.basicConfig(filename=logfilename, level=logging.INFO)
    logging.basicConfig(format='%(asctime)s %(message)s')
    logging.info("Starting up!")

    #function to run when terminating program
    signal.signal(signal.SIGTERM, terminate)
    return chain, api, latestidfile, events
コード例 #15
0
def dispatch():
    db_session = DBSession
    system_option = SystemOption.get(db_session)

    inventory_manager = InventoryManager(system_option.inventory_threads,
                                         'Inventory-Manager')
    inventory_manager.start()

    software_manager = SoftwareManager(system_option.install_threads,
                                       'Software-Manager')
    software_manager.start()

    download_manager = DownloadManager(system_option.download_threads,
                                       'Download-Manager')
    download_manager.start()

    generic_job_manager = GenericJobManager(2, 'Generic-Job')
    generic_job_manager.start()

    scheduler = Scheduler('Scheduler')
    scheduler.start()

    print('csmdispatcher started')
コード例 #16
0
    def objective_function(params):
        main_path = r'Scenarios\ParameterOpt'
        scenario = r'1800-0.2-60-1'
        bestScores = []
        solutions = []
        hyperParamSets = []
        for i in range(numRuns):
            scheduler = Scheduler(layoutFile=rf'{main_path}\{scenario}\DistanceMatrix\dm.csv', agvFile=rf'{main_path}\{scenario}\AGV\agvs.xlsx', \
                                  requestFile=rf'{main_path}\{scenario}\TRs\trs.xlsx', \
                                stationFile=rf'{main_path}\{scenario}\Station\stations.xlsx',hyperparams=params)

            solution = scheduler.solve(15)
            solutions.append(solution)
            hyperParamSets.append(params)
            bestScores.append(solution.get('bestScores')[-1][1])
        bestScore = statistics.mean(bestScores)
        return {
            'loss': bestScore,
            'status': STATUS_OK,
            'solution': solutions,
            'paramSet': hyperParamSets,
            'bestScores': bestScores
        }
コード例 #17
0
def _main():
    # pylint: disable=import-outside-toplevel
    args = _arguments()

    # start runner
    if args.interface == 'direct':
        from runner_direct import RunnerDirect
        runner = RunnerDirect(args.gams)
    elif args.interface == 'pyomo':
        from runner_pyomo import RunnerPyomo
        runner = RunnerPyomo()
    elif args.interface == 'jump':
        from runner_jump import RunnerJump
        if args.threads == 1:
            # runner = RunnerJump(args.gams, use_pyjulia=True)
            runner = RunnerJump(args.gams, use_pyjulia=False)
        else:
            runner = RunnerJump(args.gams, use_pyjulia=False)

    # select model files
    if args.testset == 'minlplib':
        model_path = os.path.join('testsets', 'minlplib', runner.modelfile_ext)
        solu_file = os.path.join('testsets', 'minlplib', 'minlplib.solu')
    elif args.testset == 'princetonlib':
        model_path = os.path.join('testsets', 'princetonlib',
                                  runner.modelfile_ext)
        solu_file = None
    elif args.testset == 'other':
        model_path = args.modelpath
        solu_file = None

    # run benchmark
    scheduler = Scheduler(runner, args.result, args.gamsopt,
                          Output(args.output))
    scheduler.create(model_path, args.max_jobs, args.max_time, args.kill_time,
                     solu_file)
    scheduler.run(args.threads, args.max_total_time)
コード例 #18
0
    def __init__(self):
        super(PiperBot, self).__init__(daemon=True)

        self.servers = MutableNameSpace({})

        self.admins = defaultdict(list)

        self.command_char = "#"
        self.in_queue = PriorityQueue()

        self.apikeys = {}

        self.commands = {}
        self.aliases = {}
        self.plugins = {}

        self.pre_dispatch_exts = []
        self.pre_command_exts = []
        self.post_command_exts = []
        self.pre_event_exts = []
        self.post_event_exts = []
        self.pre_trigger_exts = []
        self.post_trigger_exts = []
        self.pre_regex_exts = []
        self.post_regex_exts = []

        self.worker_pool = ThreadPool(processes=4)
        self.scheduler = Scheduler()
        self.scheduler.start()

        self.message_buffer = defaultdict(
            lambda: defaultdict(lambda: deque(maxlen=50)))
        self.buffer_pattern = re.compile(r"(?:(\w+)|(\s)|^)(?:\^(\d+)|(\^+))")
        self.buffer_pattern_escape = re.compile(r"\\\^")

        self.running = False
コード例 #19
0
    def __init__(self,
                 num_cpus,
                 pid=None,
                 protocol='ipc',
                 address='turkey-server',
                 port=None):
        self.protocol = protocol
        self.address = address
        self.port = '' if port is None else str(port)
        self.pid = os.getpid() if pid is None else pid
        self.context = zmq.Context()
        self.socket = self.context.socket(zmq.ROUTER)

        self.url = '%(protocol)s://%(address)s%(sep)s%(port)s' % {
            'protocol': self.protocol,
            'address': self.address,
            'sep': '' if port is None else ':',
            'port': self.port
        }

        self.tasks = {}
        self.resources = num_cpus

        self.scheduler = Scheduler()
    def init_individual(self, ind_class, size):
        temp_jobs_list = copy.deepcopy(self.__jobs)
        temp_machines_list = copy.deepcopy(self.__machines)

        # Run the scheduler
        s = Scheduler(temp_machines_list, 1, temp_jobs_list)
        s.run(Heuristics.random_operation_choice, verbose=False)

        # Retriving all the activities and the operation done
        list_activities = []
        for temp_job in temp_jobs_list:
            for temp_activity in temp_job.activities_done:
                activity = self.__jobs[temp_activity.id_job - 1].get_activity(
                    temp_activity.id_activity)
                operation = activity.get_operation(
                    temp_activity.operation_done.id_operation)
                list_activities.append(
                    (temp_activity.operation_done.time, activity, operation))
        # Ordering activities by time
        list_activities = sorted(list_activities, key=lambda x: x[0])
        individual = [(activity, operation)
                      for (_, activity, operation) in list_activities]
        del temp_jobs_list, temp_machines_list
        return ind_class(individual)
コード例 #21
0
 def setUp(self):
     self.scheduler = Scheduler()
     self.scheduler.select_dataset_file("TestCase/t5.txt")
コード例 #22
0
def main():
    try:
        s = Scheduler()
        s.run()
    except:
        main()
コード例 #23
0
def main():
    print('程序开始运行。。')
    s = Scheduler()
    s.run()
コード例 #24
0
ファイル: config.py プロジェクト: minsuklee/nds-ide
from power import Power

###############################################################################
# DISPLAY
##########
# You shall modify pygame to True or False if you want or not to use pygame (True by default)
# You shall modify screen if you want to change screen dimensions ([800, 600] by default)
###############################################################################
dispatch_display = Display(pygame=True, screen=[400, 560])

###############################################################################
# SCHEDULER
##########
# You shall modify speedCoeff only
###############################################################################
scheduler = Scheduler(speedCoeff=1)

###############################################################################
# REGISTERS
##########
# Add registers like that : reg = Register("NAME")
###############################################################################

###############################################################################
# CAN NETWORK
##########
# Add Can Network if needed
###############################################################################
network_master = Ether()
network_slave = Ether()
コード例 #25
0
	def run(self):
		self.s = Scheduler(self.cfg["name"], self.cfg["interval"],self.cfg["callback"],self.cfg)
		self.s.run()
コード例 #26
0
import config
from mongodb import MongoDB

from connexion.exceptions import OAuthProblem

# Create the application instance
app = connexion.App(__name__, specification_dir='./')

# Read the swagger.yml file to configure the endpoints
app.add_api('swagger.yaml')

mDB = MongoDB()


# Create a URL route in our application for "/"
@app.route('/')
def home():
    """
    This function just responds to the browser ULR
    localhost:5000/
    :return:        the rendered template 'home.html'
    """
    return "Hello World"


x = Scheduler()
x.startEngine()
x.addDTAPI()
# If we're running in stand alone mode, run the application
if __name__ == '__main__':
    app.run(host='0.0.0.0', port=config.PORT, debug=True)
コード例 #27
0
ファイル: main.py プロジェクト: yosh778/OG-Bot
logger = logging.getLogger('OGBot')
logger.setLevel(config.log_level)
ch = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)

logger.info('Starting the bot')

auth_client = authentication.AuthenticationProvider(config)
browser = auth_client.get_browser()

bot = None
if config.scheduler:
    scheduler = Scheduler(browser, config)
else:
    bot = OgameBot(browser, config)

switcher = {
    'overview': bot.overview,
    'log_fleet_movement': bot.log_fleet_movement,
    'explore': bot.explore,
    'attack_inactive_planets': bot.attack_inactive_planets,
    'auto_build_defenses': bot.auto_build_defenses,
    'auto_build_defenses_to_planet': bot.auto_build_defenses_to_planet,
    'transport_resources_to_planet': bot.transport_resources_to_planet,
    'transport_resources_to_least_developed_planet':
    bot.transport_resources_to_least_developed_planet,
    'transport_resources_to_least_defended_planet':
    bot.transport_resources_to_least_defended_planet,
コード例 #28
0
ファイル: controller.py プロジェクト: seasider1960/pi_alarm
    navbar = get_navbar_template()
    active_tab = kwargs.get('active_tab', None)

    if active_tab is 0 or active_tab:
        navbar[active_tab]['active'] = True

    kwargs['navbar'] = navbar
    return mytemplate.render(get_flashed_messages=get_flashed_messages,
                             **kwargs)


# -- Global Variables
current_day = datetime.date.today() + datetime.timedelta(days=1)

light_driver = LightDriver()
scheduler = Scheduler()


# -- Routes
@app.route('/')
@app.route('/index')
@app.route('/home')
def index():
    set_current_day(datetime.date.today() + datetime.timedelta(days=1))
    return redirect(('/' + get_current_day_name()).lower())


@app.route('/set_alarm', methods=['POST'])
def set_alarm():
    alarm_time = request.form.get('time')
    weekday = get_current_day().isoweekday()
コード例 #29
0
ファイル: full_sim.py プロジェクト: gitmjreilly/albert_sim
def construct_computer_system():
    global the_cpu
    global address_space
    # global ram
    global the_ram
    global console_serial_port
    global interrupt_controller
    global scheduler
    global counter_0
    # global serial_1
    # global serial_2

    console_serial_port = FifoSerialPort(
        listen_port=5000,
        input_delay=1200,
        # output_delay = 1200,
        output_delay=1,
        name="Console")

    the_cpu = CPU()

    address_space = AddressSpace()
    the_ram = RAM()

    interrupt_controller = Interrupt_Controller()

    counter_0 = Mem_Counter()

    #    serial_1 = FifoSerialPort(
    #        listen_port = 5600,
    #        input_delay = 1200,
    #        output_delay = 1200,
    #        name = "Disk Controller")
    #
    #    serial_2 = FifoSerialPort(
    #        listen_port = 6000,
    #        input_delay = 1200,
    #        output_delay = 1200,
    #        name = "Terminal Controller")
    #

    # Please note address spaces can overlap. They are searched in FIFO order
    address_space.add_device(0xF000, 0xF00F, console_serial_port)
    address_space.add_device(0xF010, 0xF01F, interrupt_controller)
    # address_space.add_device(0xF090, 0xF09F, serial_1)
    # address_space.add_device(0xF030, 0xF03F, serial_2)
    address_space.add_device(0xF060, 0xF06F, counter_0)

    address_space.add_device(0x1F000, 0x1F00F, console_serial_port)
    address_space.add_device(0x2F000, 0x2F00F, console_serial_port)
    address_space.add_device(0x3F000, 0x3F00F, console_serial_port)
    address_space.add_device(0x4F000, 0x4F00F, console_serial_port)

    # Make sure to keep RAM at end of address space because
    # address space is searched (for devices) in insertion order
    address_space.add_device(0, 1024 * 1024 * 8, the_ram)

    the_cpu.set_memory_methods(address_space.read, address_space.write,
                               address_space.code_read,
                               address_space.write_type)

    # "Connect" the counter's "Zero" output to interrupt source 1 as is done in the VHDL
    interrupt_controller.register_interrupt_source_function(
        counter_0.get_counter_is_zero, 1)

    # Connect the disk uart "rx half full" line to interrupt source 4 as in VHDL
    #    interrupt_controller.register_interrupt_source_function(
    #        serial_1.get_rx_half_full,
    #        4)
    #
    #    # Connect the ptc uart "rx quarter full" line to interrupt source 5
    #    interrupt_controller.register_interrupt_source_function(
    #        serial_2.get_rx_quarter_full,
    #        5)
    #
    #    # interrupt_controller.register_interrupt_source_function(
    #        # serial_3.get_input_data_available,
    #        # 2)
    #

    scheduler = Scheduler()

    #
    # The serial port needs to schedule future events.  The scheduling
    # function is scheduler.add_event() - signature below
    #     add_event(self, event_method, scheduled_time, name_of_event = "")
    console_serial_port.register_scheduler_function(scheduler.add_event)

    #
    # The "high speed" fifo serial port needs to schedule future events.  The scheduling
    # function is scheduler.add_event() - signature below
    #     add_event(self, event_method, scheduled_time, name_of_event = "")
    # serial_1.register_scheduler_function(scheduler.add_event)

    # serial_2.register_scheduler_function(scheduler.add_event)

    # serial_3.register_scheduler_function(scheduler.add_event)

    #
    # The timer/counter needs to schedule future events (ie. the ticks)
    counter_0.register_scheduler_function(scheduler.add_event)

    reset_computer()
コード例 #30
0
def main():
    try:
        os.mkdir(args.snapshot_directory)
    except:
        pass

    np.random.seed(0)

    xp = np
    device_gpu = args.gpu_device
    device_cpu = -1
    using_gpu = device_gpu >= 0
    if using_gpu:
        cuda.get_device(args.gpu_device).use()
        xp = cupy

    dataset = gqn.data.Dataset(args.dataset_directory)

    hyperparams = HyperParameters()
    hyperparams.generator_share_core = args.generator_share_core
    hyperparams.generator_share_prior = args.generator_share_prior
    hyperparams.generator_generation_steps = args.generation_steps
    hyperparams.generator_share_upsampler = args.generator_share_upsampler
    hyperparams.inference_share_core = args.inference_share_core
    hyperparams.inference_share_posterior = args.inference_share_posterior
    hyperparams.h_channels = args.h_channels
    hyperparams.z_channels = args.z_channels
    hyperparams.u_channels = args.u_channels
    hyperparams.image_size = (args.image_size, args.image_size)
    hyperparams.representation_channels = args.representation_channels
    hyperparams.representation_architecture = args.representation_architecture
    hyperparams.pixel_n = args.pixel_n
    hyperparams.pixel_sigma_i = args.initial_pixel_variance
    hyperparams.pixel_sigma_f = args.final_pixel_variance
    hyperparams.save(args.snapshot_directory)
    print(hyperparams)

    model = Model(hyperparams,
                  snapshot_directory=args.snapshot_directory,
                  optimized=args.optimized)
    if using_gpu:
        model.to_gpu()

    scheduler = Scheduler(sigma_start=args.initial_pixel_variance,
                          sigma_end=args.final_pixel_variance,
                          final_num_updates=args.pixel_n,
                          snapshot_directory=args.snapshot_directory)
    print(scheduler)

    optimizer = AdamOptimizer(model.parameters,
                              mu_i=args.initial_lr,
                              mu_f=args.final_lr,
                              initial_training_step=scheduler.num_updates)
    print(optimizer)

    pixel_var = xp.full((args.batch_size, 3) + hyperparams.image_size,
                        scheduler.pixel_variance**2,
                        dtype="float32")
    pixel_ln_var = xp.full((args.batch_size, 3) + hyperparams.image_size,
                           math.log(scheduler.pixel_variance**2),
                           dtype="float32")

    representation_shape = (args.batch_size,
                            hyperparams.representation_channels,
                            args.image_size // 4, args.image_size // 4)

    fig = plt.figure(figsize=(9, 3))
    axis_data = fig.add_subplot(1, 3, 1)
    axis_data.set_title("Data")
    axis_data.axis("off")
    axis_reconstruction = fig.add_subplot(1, 3, 2)
    axis_reconstruction.set_title("Reconstruction")
    axis_reconstruction.axis("off")
    axis_generation = fig.add_subplot(1, 3, 3)
    axis_generation.set_title("Generation")
    axis_generation.axis("off")

    current_training_step = 0
    for iteration in range(args.training_iterations):
        mean_kld = 0
        mean_nll = 0
        mean_mse = 0
        mean_elbo = 0
        total_num_batch = 0
        start_time = time.time()

        for subset_index, subset in enumerate(dataset):
            iterator = gqn.data.Iterator(subset, batch_size=args.batch_size)

            for batch_index, data_indices in enumerate(iterator):
                # shape: (batch, views, height, width, channels)
                # range: [-1, 1]
                images, viewpoints = subset[data_indices]

                # (batch, views, height, width, channels) -> (batch, views, channels, height, width)
                images = images.transpose((0, 1, 4, 2, 3)).astype(np.float32)

                total_views = images.shape[1]

                # Sample number of views
                num_views = random.choice(range(1, total_views + 1))
                observation_view_indices = list(range(total_views))
                random.shuffle(observation_view_indices)
                observation_view_indices = observation_view_indices[:num_views]
                query_index = random.choice(range(total_views))

                if num_views > 0:
                    observation_images = preprocess_images(
                        images[:, observation_view_indices])
                    observation_query = viewpoints[:, observation_view_indices]
                    representation = model.compute_observation_representation(
                        observation_images, observation_query)
                else:
                    representation = xp.zeros(representation_shape,
                                              dtype="float32")
                    representation = chainer.Variable(representation)

                # Sample query
                query_index = random.choice(range(total_views))
                query_images = preprocess_images(images[:, query_index])
                query_viewpoints = viewpoints[:, query_index]

                # Transfer to gpu if necessary
                query_images = to_device(query_images, device_gpu)
                query_viewpoints = to_device(query_viewpoints, device_gpu)

                z_t_param_array, mean_x = model.sample_z_and_x_params_from_posterior(
                    query_images, query_viewpoints, representation)

                # Compute loss
                ## KL Divergence
                loss_kld = 0
                for params in z_t_param_array:
                    mean_z_q, ln_var_z_q, mean_z_p, ln_var_z_p = params
                    kld = gqn.functions.gaussian_kl_divergence(
                        mean_z_q, ln_var_z_q, mean_z_p, ln_var_z_p)
                    loss_kld += cf.sum(kld)

                ## Negative log-likelihood of generated image
                loss_nll = cf.sum(
                    gqn.functions.gaussian_negative_log_likelihood(
                        query_images, mean_x, pixel_var, pixel_ln_var))

                # Calculate the average loss value
                loss_nll = loss_nll / args.batch_size
                loss_kld = loss_kld / args.batch_size

                loss = loss_nll / scheduler.pixel_variance + loss_kld

                model.cleargrads()
                loss.backward()
                optimizer.update(current_training_step)

                loss_nll = float(loss_nll.data) + math.log(256.0)
                loss_kld = float(loss_kld.data)

                elbo = -(loss_nll + loss_kld)

                loss_mse = float(
                    cf.mean_squared_error(query_images, mean_x).data)

                printr(
                    "Iteration {}: Subset {} / {}: Batch {} / {} - elbo: {:.2f} - loss: nll: {:.2f} mse: {:.6e} kld: {:.5f} - lr: {:.4e} - pixel_variance: {:.5f} - step: {}  "
                    .format(iteration + 1,
                            subset_index + 1, len(dataset), batch_index + 1,
                            len(iterator), elbo, loss_nll, loss_mse, loss_kld,
                            optimizer.learning_rate, scheduler.pixel_variance,
                            current_training_step))

                scheduler.step(iteration, current_training_step)
                pixel_var[...] = scheduler.pixel_variance**2
                pixel_ln_var[...] = math.log(scheduler.pixel_variance**2)

                total_num_batch += 1
                current_training_step += 1
                mean_kld += loss_kld
                mean_nll += loss_nll
                mean_mse += loss_mse
                mean_elbo += elbo

            model.serialize(args.snapshot_directory)

            # Visualize
            if args.with_visualization:
                axis_data.imshow(make_uint8(query_images[0]),
                                 interpolation="none")
                axis_reconstruction.imshow(make_uint8(mean_x.data[0]),
                                           interpolation="none")

                with chainer.no_backprop_mode():
                    generated_x = model.generate_image(
                        query_viewpoints[None, 0], representation[None, 0])
                    axis_generation.imshow(make_uint8(generated_x[0]),
                                           interpolation="none")
                plt.pause(1e-8)

        elapsed_time = time.time() - start_time
        print(
            "\033[2KIteration {} - elbo: {:.2f} - loss: nll: {:.2f} mse: {:.6e} kld: {:.5f} - lr: {:.4e} - pixel_variance: {:.5f} - step: {} - time: {:.3f} min"
            .format(iteration + 1, mean_elbo / total_num_batch,
                    mean_nll / total_num_batch, mean_mse / total_num_batch,
                    mean_kld / total_num_batch, optimizer.learning_rate,
                    scheduler.pixel_variance, current_training_step,
                    elapsed_time / 60))
        model.serialize(args.snapshot_directory)