def __init__(self, env_fns, spaces=None):
     """
     If you don't specify observation_space, we'll have to create a dummy
     environment to get it.
     """
     if spaces:
         observation_space, action_space = spaces
     else:
         logger.log('Creating dummy env object to get spaces')
         with logger.scoped_configure(format_strs=[]):
             dummy = env_fns[0]()
             observation_space, action_space = dummy.observation_space, dummy.action_space
             dummy.close()
             del dummy
     VecEnv.__init__(self, len(env_fns), observation_space, action_space)
     self.obs_keys, self.obs_shapes, self.obs_dtypes = obs_space_info(observation_space)
     self.obs_bufs = [
         {k: Array(_NP_TO_CT[self.obs_dtypes[k].type], int(np.prod(self.obs_shapes[k]))) for k in self.obs_keys}
         for _ in env_fns]
     self.parent_pipes = []
     self.procs = []
     for env_fn, obs_buf in zip(env_fns, self.obs_bufs):
         wrapped_fn = CloudpickleWrapper(env_fn)
         parent_pipe, child_pipe = Pipe()
         proc = Process(target=_subproc_worker,
                        args=(child_pipe, parent_pipe, wrapped_fn, obs_buf, self.obs_shapes, self.obs_dtypes, self.obs_keys))
         proc.daemon = True
         self.procs.append(proc)
         self.parent_pipes.append(parent_pipe)
         proc.start()
         child_pipe.close()
     self.waiting_step = False
     self.viewer = None
Beispiel #2
0
def default(username, subject, body):
    color.print_color(
        'yellow',
        '-------------------------------\n' + "             DEFAULT\n" +
        "username: "******"\n" + "subject:  " + subject + "\n" +
        "body:     " + body + "\n" + '-------------------------------\n\n')
    logger.log(username + ' submitted an incorrect command')
Beispiel #3
0
 def make_blocked(self):
     ''' Little helper that removes process from a ready list if it's in there and makes the process ready '''
     if self in kernel_data.READY_PROCESSES.processes():
         kernel_data.READY_PROCESSES.remove_process(self)
     kernel_data.BLOCKED_PROCESSES.add(self)
     self.state = BLOCKED_STATE
     logger.log("Process: " + self.name + " was blocked")
Beispiel #4
0
def feedback(username, user_feedback):
    color.print_color(
        'yellow',
        '-------------------------------\n' + '            FEEDBACK\n' +
        'username: '******'feedback: ' + user_feedback + "\n" +
        '-------------------------------\n\n')
    logger.log(username + ' submitted feedback')
Beispiel #5
0
def schedule():
    ''' 
      1. change active process if active is blocked
    '''
    if kernel_data.ACTIVE_PROCESS.state == BLOCKED_STATE:
        kernel_data.BLOCKED_PROCESSES.add(kernel_data.ACTIVE_PROCESS)

    if kernel_data.READY_PROCESSES.not_waiting_processes() != [] and kernel_data.ACTIVE_PROCESS.state != RUNNING_STATE:
        p = kernel_data.READY_PROCESSES.get()
        p.state = RUNNING_STATE
        kernel_data.ACTIVE_PROCESS = p
    else: # there only blocked processes are left 
        # change active process only if it was blocked
        if kernel_data.ACTIVE_PROCESS.state == BLOCKED_STATE:
            kernel_data.ACTIVE_PROCESS = kernel_data.BLOCKED_PROCESSES.get()
            kernel_data.ACTIVE_PROCESS.state = RUNNING_STATE

        # cleaner 
          # checks all resources and if it is free, makes processes that are waiting for it ready
        logger.log("CLEANER ACTIVATED: ")
        for res in kernel_data.RESOURCES.resources():
            if res.free():
                for proc in kernel_data.BLOCKED_PROCESSES.processes():
                    if not len(res.awaiting_processes.processes()) == 0:
                        lucky_process = res.awaiting_processes.get()
                        lucky_process.make_ready()
def get_number_of_clusters(dataset):
    cluster_range = range(2, 21)
    silhouettes = []

    logger.log("Defining number of clusters ...", True)

    for num_clusters in cluster_range:
        model = KMeans(n_clusters=num_clusters,
                       init='k-means++',
                       n_init=50,
                       max_iter=300,
                       tol=0.001,
                       precompute_distances=True,
                       verbose=0,
                       random_state=None,
                       copy_x=True,
                       n_jobs=-1)

        model.fit(dataset)

        silhouettes.append(
            metrics.silhouette_score(dataset,
                                     model.labels_,
                                     metric='euclidean'))

    number = get_cut_off_point_by_second_derivate(
        pd.DataFrame(silhouettes)) - 2
    logger.log("selected: " + str(number) + " clusters", True)

    return number
Beispiel #7
0
    async def _remove_deleted_guilds(self):
        '''
        Helper method to purge the settings of deleted guilds from
        the database. The ON CASCADE DELETE option in the database
        also makes this wipe the subscriptions automatically.
        '''
        conn = sqlite3.connect('data.db')
        c = conn.cursor()
        c.execute('PRAGMA foreign_keys = ON')

        c.execute('SELECT guild_id FROM GuildSettings')
        guild_ids = [item[0] for item in c.fetchall()]

        total = len(guild_ids)
        count = 0

        for guild_id in guild_ids:
            if not self.bot.get_guild(guild_id):
                c.execute('DELETE FROM GuildSettings WHERE guild_id = ?',
                          (guild_id, ))
                count += 1

        conn.commit()
        conn.close()

        log(f'Purged {count} out of {total} guilds')
Beispiel #8
0
def get_redshift_cursor(connection):
    cursor = None
    try:
        cursor = connection.cursor()
    except AttributeError as error:
        log(error)
        exit(1)
    return cursor
Beispiel #9
0
    async def push(self, ctx, current_hour:int=datetime.datetime.utcnow().hour):
        log(f'Manual subscription push requested for {current_hour}:00 GMT')

        if (self.EARLIEST_TIME <= current_hour <= self.LATEST_TIME):
            if (current_hour == self.EARLIEST_TIME): self.regenerate_all()
            await ctx.message.add_reaction('✅')
            await self.push_subscriptions(current_hour)
        else:
            await ctx.message.add_reaction('❌')
Beispiel #10
0
    def __init__(self, bot):
        self.bot = bot

        # This list is for indexing-display purposes
        self.lectionary_names = [
            'armenian',
            'book of common prayer',
            'catholic',
            'america orthodox',
            'coptic orthodox',
            'greek orthodox',
            'russian orthodox',
            'revised common']

        # Lectionary Objects
        self.lectionaries = [
            ArmenianLectionary(),
            BookOfCommonPrayer(),
            CatholicLectionary(),
            OrthodoxAmericanLectionary(),
            OrthodoxCopticLectionary(),
            OrthodoxGreekLectionary(),
            OrthodoxRussianLectionary(),
            RevisedCommonLectionary()]

        log('Initial data fetch')

        # Initialize the database if it's not ready
        c    = conn.cursor()
        

        # Guild settings table
        c.execute('''
            CREATE TABLE IF NOT EXISTS GuildSettings (
                guild_id BIGINT NOT NULL,
                time     BIGINT NOT NULL,
                PRIMARY KEY (guild_id)
            )
        ''')

        # Subscriptions table
        c.execute('''
            CREATE TABLE IF NOT EXISTS Subscriptions (
                guild_id   BIGINT NOT NULL,
                channel_id BIGINT NOT NULL,
                sub_type   BIGINT NOT NULL,
                FOREIGN KEY (guild_id) REFERENCES GuildSettings(guild_id) ON DELETE CASCADE
            )
        ''')
        
        conn.commit()

        # Start up the event loop
        self.last_fufill = datetime.datetime.utcnow().hour
        self.fufill_subscriptions.start()

        log(f'Bot booted. Will not fufill subscriptions for {self.last_fufill}:00 GMT or prior.')
Beispiel #11
0
def render_links():
    log("Request received for links at {}".format(datetime.datetime.now()))

    links_blob = blob.read_blob('links.json')
    links_data = link.parse_title(links_blob)
    blob.write_blob("links.json", links_data)

    log("Request finished for links at {}".format(datetime.datetime.now()))
    return ""
Beispiel #12
0
 async def shutdown(self, ctx):
     '''
     Command to safely shutdown the bot with a reduced chance of
     damaging the database. (Bot owner only.)
     '''
     self.fufill_subscriptions.stop()
     await ctx.message.add_reaction('✅')
     log('Shutdown request, logging out')
     await ctx.bot.close()
Beispiel #13
0
def copy_s3_file_to_redshift(app, connection):
    redshift_query = s3_to_redshift(app)
    try:
        cursor.execute(redshift_query)
        log('Copy Command executed successfully')
    except:
        log('Failed to execute copy command: exiting')
        exit(1)
    connection.close()
Beispiel #14
0
def read_blob(blob_name):
    log("Reading blob: {}".format(blob_name))
    blob = None
    try:
        block_blob_service = BlockBlobService(account_name=account_name,
                                              account_key=account_key)
        blob = block_blob_service.get_blob_to_text(container_name, blob_name)
    except Exception as e:
        log("Error while reading blob..{}".format(e))
    return blob.content
def get_results(x_selected, number_of_clusters=None):

    if number_of_clusters is None:
        best_number_of_clusters = get_k_by_gap_statistic(x_selected)
    else:
        best_number_of_clusters = number_of_clusters

    logger.log("Number of clusters: " + str(best_number_of_clusters), True)

    return get_avg_sil(x_selected, best_number_of_clusters)
Beispiel #16
0
def render_visning():
    log("Request received for visnings at {}".format(datetime.datetime.now()))

    link_blob = blob.read_blob('links.json')
    visning_blob = blob.read_blob('visning.json')
    visnings = visning.parse_visning(link_blob, visning_blob)
    blob.write_blob("visning.json", visnings)

    log("Request finished for visnings at {}".format(datetime.datetime.now()))
    return ""
Beispiel #17
0
def create_process(proc_class, father_proc, opts = {}):
    ''' creates a process
      
      1. create instance 
      2. created instance is added to ready processes list
    '''
    opts['parent'] = father_proc
    process = proc_class(opts) # parent option must be set
    father_proc.children.append(process)
    process.make_ready()
    logger.log("Process created: " + process.name)
Beispiel #18
0
def render_sold():
    log("Request received for sold at {}".format(datetime.datetime.now()))

    link_blob = blob.read_blob('links.json')
    sold_blob = blob.read_blob('sold.json')
    sold_data = sold.parse_sold(link_blob, sold_blob)

    blob.write_blob("sold.json", sold_data)

    log("Request finished for sold at {}".format(datetime.datetime.now()))
    return ""
Beispiel #19
0
def match_exception(username, item, message_id, title, permalink, url):
    color.print_color(
        'red',
        "match exception caught\n" + "username:   "******"\n" +
        #"email:   " + email + "\n" +
        #"twitter:   " + twitter + "\n" +
        "message id: " + message_id + "\n" + "item:       " + item + "\n" +
        "title:      " + title + "\n" + "reddit url: " + permalink + "\n" +
        "sale link:  " + url + "\n" + "stacktrace:\n" +
        traceback.format_exc() + "\n\n")
    logger.log('Match exception caught')
Beispiel #20
0
    async def push(self,
                   ctx,
                   current_hour: int = datetime.datetime.utcnow().hour):
        log(f'Manual subscription push requested for {current_hour}:00 GMT')

        if 7 <= current_hour <= 23:
            self.regenerate_all_data()
            self.build_all_embeds()
            await ctx.message.add_reaction('✅')
            await self.push_subscriptions(current_hour)
        else:
            await ctx.message.add_reaction('❌')
Beispiel #21
0
def delete_resource(resource):
    ''' 
        1. remove resource from creators resources list
        2. unblock processes avaiting this resource
        3. remove resource from resources 
    '''
    logger.log("Resource deleted: " + resource.name)
    resource.creator.created_resources.remove(resource)
    kernel_data.ACTIVE_PROCESS.used_resources.remove(resource)
    for p in resource.awaiting_processes.processes():
        p.make_ready()
    kernel_data.RESOURCES.remove(resource)
Beispiel #22
0
def write_blob(file_name, text):
    log("Writing blob: {}".format(file_name))
    try:
        block_blob_service = BlockBlobService(account_name=account_name,
                                              account_key=account_key)
        block_blob_service.create_container(container_name)
        # Set the permission so the blobs are public.
        block_blob_service.set_container_acl(
            container_name, public_access=PublicAccess.Container)
        block_blob_service.create_blob_from_text(container_name, file_name,
                                                 text)
    except Exception as e:
        log("Error while writing blob..{}".format(e))
def run_GLSPFS(X, state_of_art):
    logger.log("running GLSPFS ...")
    param_population = params.get_GLSPFS_params()
    data = np.copy(X)
    n_features = data.shape[1]

    scores = genetic.select_best_rankings(n_features, param_population,
                                          'GLSPFS', data, None, None,
                                          state_of_art)

    if not state_of_art:
        scores = commons.remove_duplicated_rankings(scores)
    return scores
Beispiel #24
0
def match(username, email, twitter, item, message_id, title, permalink, url):
    emailmsg = ''
    if email != None:
        emailmsg = ' and email was sent to ' + email
    color.print_color(
        'magenta',
        "-------------------------------\n" + "        SUBMISSION MATCH\n" +
        "username:   "******"\n" + "email:   " + xstr(email) + "\n" +
        "twitter:   " + xstr(twitter) + "\n" + "message id: " + message_id +
        "\n" + "item:       " + item + "\n" + "title:      " + title + "\n" +
        "reddit url: " + permalink + "\n" + "sale link:  " + url + "\n" +
        '-------------------------------\n\n')
    logger.log('Notified ' + username + ' of match for ' + item + emailmsg)
Beispiel #25
0
 def make_ready(self):
     ''' Little helper that makes a process ready:
         
         1. removes it from blocked processes list if it's in list (ex. not in list if active)
         2. adds it to ready processes list
         3. changes process state to ready
     '''
     if self in kernel_data.BLOCKED_PROCESSES.processes():
         kernel_data.BLOCKED_PROCESSES.remove(self)
     kernel_data.READY_PROCESSES.add(self)
     self.state = READY_STATE
     if not kernel_data.ACTIVE_PROCESS != self:
         logger.log("Process " + self.name + " was made ready")
def run_iDetect(X, default_configs):
    logger.log("running iDetect ...")
    param_population = params.get_iDetect_params(default_configs)
    data = np.copy(X)
    n_features = data.shape[0]

    scores = genetic.select_best_rankings(n_features, param_population,
                                          'iDetect', data, None, None,
                                          default_configs)

    if not default_configs:
        scores = commons.remove_duplicated_rankings(scores)
    return scores
Beispiel #27
0
    def __init__(self, bot):
        self.bot = bot

        # This list is for indexing-display purposes
        # In the database, 0 through 3 coorspond to these
        self.lectionary_names = [
            'armenian', 'catholic', 'orthodox', 'revised common'
        ]

        # Lectionary Objects
        self.armenian = ArmenianLectionary()
        self.catholic = CatholicLectionary()
        self.orthodox = OrthodoxLectionary()
        self.rcl = RevisedCommonLectionary()
        log('Initial data fetch')

        self.build_all_embeds()

        conn = sqlite3.connect('data.db')
        c = conn.cursor()
        c.execute('PRAGMA foreign_keys = ON')

        # Set up guild settings table if it isn't already
        c.execute('''
            CREATE TABLE IF NOT EXISTS GuildSettings (
                guild_id INTEGER NOT NULL,
                time     INTEGER NOT NULL,
                PRIMARY KEY (guild_id)
            )
        ''')

        # Set up subscription table if it isn't already
        c.execute('''
            CREATE TABLE IF NOT EXISTS Subscriptions (
                guild_id   INTEGER NOT NULL,
                channel_id INTEGER NOT NULL,
                sub_type   INTEGER NOT NULL,
                FOREIGN KEY (guild_id) REFERENCES GuildSettings(guild_id) ON DELETE CASCADE
            )
        ''')

        conn.commit()
        conn.close()

        # Start up the event loop
        self.last_fufill = datetime.datetime.utcnow().hour
        self.fufill_subscriptions.start()

        log(f'Bot booted; will not fufill subscriptions for today from {self.last_fufill}:00 GMT or prior'
            )
Beispiel #28
0
def render_price():
    log("Request received for price at {}".format(datetime.datetime.now()))

    link_blob = blob.read_blob('links.json')
    pris_blob = blob.read_blob('pris.json')
    multiple_blob = blob.read_blob('multiplePris.json')
    pris = price.parse_price(link_blob, pris_blob)
    multiple = price.multiple_price_links(multiple_blob, pris_blob)

    blob.write_blob("multiplePris.json", multiple)
    blob.write_blob("pris.json", pris)

    log("Request finished for price at {}".format(datetime.datetime.now()))
    return ""
Beispiel #29
0
def free_resource(resource, process, semaphore_guarded = False):
    '''
        1. process is removed from resources awaiting processes list
        1.1. resource is removed from process used_resources list
        2. resource planner is called
    '''
    logger.log("Resource " + resource.name + " freed by " + process.name)
    resource.awaiting_processes.remove(process)
    process.used_resources.remove(resource)

    # if process is guarded with semaphore, release the as well lock
    if semaphore_guarded:
        resource.sem.v()
        logger.log("RELEASED SEMAPHORE LOCK on: " + resource.name)
    resource.distribute()
Beispiel #30
0
def cleanup_sold(sold_blob, pris_blob):
    sold_links = []
    sold = json.loads(sold_blob)
    for link in sold['links']:
        sold_links.append(link['link'])

    price_data = json.loads(pris_blob)
    count = 0
    for item in list(price_data['links']):
        if item['link'] in sold_links:
            if len(item['price_list']) < 2:
                log("Deleting link from pris: {}".format(link['link']))
                del price_data['links'][count]
            else:
                count = count + 1
    data = json.dumps(price_data, indent=4, sort_keys=True, ensure_ascii=False)
    return data
Beispiel #31
0
def create_resource(res_class, process, opts = {}):
    ''' create resource 

      1. create new instance of resource
      2. created resource is added to creators created resources list
      3. created resource is added to kernels resources list 
        
    '''
    opts['creator'] = process
    resource = res_class(opts) 
    process.created_resources.add(resource)
    kernel_data.RESOURCES.add(resource)
    logger.log("Resource created: " + resource.name)

    # update blocked processes
    for proc in kernel_data.BLOCKED_PROCESSES.processes():
        if proc.is_waiting_for(res_class):
            proc.awaiting_for_creation = None
            proc.make_ready()
Beispiel #32
0
def terminate_process(process):
    '''
      1. child processes are terminated
      2. resources are freed?
      3. resources created by this process are destroyed
      4. process is removed from its parents child processes list
      5. process is removed from kernel process list
    '''
    logger.log("Process terminated: " + process.name)
    for child in process.children:
        kernel.terminate_process(child)

    temp = process.used_resources.resources()
    for res in temp:
        kernel.free_resource(res, process)

    temp = process.created_resources.resources()
    for res in temp:
        kernel.delete_resource(res)

    if not process.__class__.__name__ == 'Root':
        process.parent.children.remove(process)

    kernel_data.READY_PROCESSES.remove(process) or kernel_data.BLOCKED_PROCESSES.remove(process)
Beispiel #33
0
 def distribute(self):
     ''' Algorithm that distributes resource. (Resurso planuotojas) '''
     logger.log("DISTRIBUTOR called on: " + self.name)
     if self.free():
         logger.log("GOT TRUE")
         pr = self.awaiting_processes.get()
         # if there are processes waiting for this resource
         # give resource to them
         if pr is not None:
             pr.used_resources.add(self)
             pr.make_ready()
             self.awaiting_processes.remove(pr)
             # lock_with_semaphore when asking_for_resoure
             # if resource is guarded with semaphore create lock 
             if hasattr(self, 'sem'):
                 self.sem.v() 
                 logger.log("LOCKED SEMAPHORE on " + self.name)
     else:
         kernel_data.ACTIVE_PROCESS.state = BLOCKED_STATE 
         self.awaiting_processes.add(kernel_data.ACTIVE_PROCESS)
         logger.log("GOT FALSE")
     kernel.schedule()
Beispiel #34
0
def ask_for_resource(resource, process, res_class):
    ''' 
        1. process is blocked and added to resources awaiting processes list
        2. resource planner is called
    ''' 
    if resource is not None:
        logger.log("Process: " + process.name + " asked for resource " + resource.name)
        resource.awaiting_processes.add(process)
        process.awaiting_for_creation = None
        resource.distribute()
    else:
        # there is no such resource created
        logger.log("Process: " + process.name + " asked for resource that is not yet created")
        kernel_data.ACTIVE_PROCESS.state = BLOCKED_STATE
        logger.log(process.name + " now waits for resource " + str(res_class) +" to be created")
        process.awaiting_for_creation = res_class
        kernel.schedule()