Exemple #1
0
 def close(self):
     info("closed game %s after %s turns (played for %s minutes)", self.id,
          self.time, self.get_nb_minutes())
     self.cancel()
     self.server.log_status()
     if config.record_games:
         self.f.close()
Exemple #2
0
 def Closing(self, event): # Cleanup cleanup, everybody do your share
     #self.thread.StopThreads()
     #self.thread.Destroy()
     log.info(_("Cleaning up plugins"))
     for name, instance, type, version in plugins.InterfacePluginList: instance.cleanup()
     log.info(_("Shutting down"))
     self.Destroy()
Exemple #3
0
def coordlink(request):
	world = World.objects.get(name=request.POST['namespace'])
	if not permissions.can_coordlink(request.user, world):
		return response_403()
	tileY, tileX = int(request.POST['tileY']), int(request.POST['tileX'])
	tile, _ = Tile.objects.get_or_create(world=world, tileY=tileY, tileX=tileX)
	if tile.properties.get('protected'):
		if not permissions.can_admin(request.user, world):
			# TODO: log?
			return HttpResponse('')
	# Must convert to str because that's how JsonField reads the existing keys
	charY = int(request.POST['charY'])
	charX = int(request.POST['charX'])
	assert charY < Tile.ROWS
	assert charX < Tile.COLS
	charY, charX = str(charY), str(charX)
	link_tileY = str(int(request.POST['link_tileY']))
	link_tileX = str(int(request.POST['link_tileX']))
	if 'cell_props' not in tile.properties:
		tile.properties['cell_props'] = {}
	if charY not in tile.properties['cell_props']:
		tile.properties['cell_props'][charY] = {}
	if charX not in tile.properties['cell_props'][charY]:
		tile.properties['cell_props'][charY][charX] = {}
	tile.properties['cell_props'][charY][charX]['link'] = {
			'type': 'coord',
			'link_tileY': link_tileY,
			'link_tileX': link_tileX,
			}
	tile.save()
	log.info('ACTION:COORDLINK %s %s %s %s %s %s %s' % (world.id, tileY, tileX, charY, charX, link_tileY, link_tileX))
	return HttpResponse('')
Exemple #4
0
def urllink(request):
	# TODO: factor out w/above
	world = World.objects.get(name=request.POST['namespace'])
	if not permissions.can_urllink(request.user, world):
		return response_403()
	tileY, tileX = int(request.POST['tileY']), int(request.POST['tileX'])
	tile, _ = Tile.objects.get_or_create(world=world, tileY=tileY, tileX=tileX)
	if tile.properties.get('protected'):
		if not permissions.can_admin(request.user, world):
			# TODO: log?
			return HttpResponse('')
	# Must convert to str because that's how JsonField reads the existing keys
	charY = int(request.POST['charY'])
	charX = int(request.POST['charX'])
	assert charY < Tile.ROWS
	assert charX < Tile.COLS
	charY, charX = str(charY), str(charX)
	url = request.POST['url'].strip()
	if not urlparse.urlparse(url)[0]: # no scheme
		url = 'http://' + url
	if 'cell_props' not in tile.properties:
		tile.properties['cell_props'] = {}
	if charY not in tile.properties['cell_props']:
		tile.properties['cell_props'][charY] = {}
	if charX not in tile.properties['cell_props'][charY]:
		tile.properties['cell_props'][charY][charX] = {}
	tile.properties['cell_props'][charY][charX]['link'] = {
			'type': 'url',
			'url': url,
			}
	tile.save()
	log.info('ACTION:URLLINK %s %s %s %s %s %s' % (world.id, tileY, tileX, charY, charX, url))
	return HttpResponse('')
Exemple #5
0
 def count_blast_hits(self):
     """Make sure we have blast hits."""
     count = db.sra_blast_hits_count(
         self.state['cxn'], self.state['iteration'])
     log.info('{} blast hits in iteration {}'.format(
         count, self.state['iteration']))
     return count
Exemple #6
0
def filter_contigs(assembler):
    """Remove junk from the assembled contigs."""
    log.info('Saving assembled contigs: iteration {}'.format(
        assembler.state['iteration']))

    blast_db = blast.temp_db_name(
        assembler.state['iter_dir'], assembler.state['blast_db'])

    hits_file = blast.output_file_name(
        assembler.state['iter_dir'], assembler.state['blast_db'])

    blast.create_db(
        assembler.state['iter_dir'], assembler.file['output'], blast_db)

    blast.against_contigs(
        blast_db,
        assembler.state['query_target'],
        hits_file,
        protein=assembler.args['protein'],
        db_gencode=assembler.args['db_gencode'],
        temp_dir=assembler.args['temp_dir'])

    save_blast_against_contigs(assembler, hits_file)

    all_hits = {row['contig_id']: row
                for row
                in db.get_contig_blast_hits(
                    assembler.state['cxn'],
                    assembler.state['iteration'])}

    return save_contigs(assembler, all_hits)
Exemple #7
0
 def _resultConsumer(self, delayedResult):
     jobID = delayedResult.getJobID()
     assert jobID == self.jobID
     try:
         result = delayedResult.get()
     except Exception, exc:
         log.info( "Result for job %s raised exception: %s" % (jobID, exc) )
         return
Exemple #8
0
 def unregister(self):
     try:
         info("unregistering server...")
         s = urllib.urlopen(UNREGISTER_URL + "?ip=" + self.ip).read()
     except:
         s = "couldn't access to the metaserver"
     if s:
         warning("couldn't unregister from the metaserver (%s)", s[:80])
Exemple #9
0
 def nothing_assembled(self):
     """Make there is assembler output."""
     if not exists(self.file['output']) \
             or not getsize(self.file['output']):
         log.info('No new assemblies in iteration {}'.format(
             self.state['iteration']))
         return True
     return False
Exemple #10
0
def start_server_and_connect(parameters):
    info("active threads: %s", threading.enumerate())
    ServerInAThread(parameters).start()
    time.sleep(.01) # Linux needs a small delay (at least on the Eee PC 4G)
    revision_checker.start_if_needed()
    connect_and_play()
    info("active threads: %s", threading.enumerate())
    sys.exit()
def handle_message(message):
    if 'text' in message and message['entities']:
        return handle_message_with_entities(message)
    elif 'friends' in message:
        log.info('Got %d friends on startup', len(message['friends']))
    elif 'delete' in message:
        pass
    else:
        log.warn('Skipping message: %r', message)
Exemple #12
0
 def initialize(self):
     do('rm -f %s/*' % config.config['main']['populationPath'])
     do('mkdir %s' % config.config['main']['populationPath'])
     do('echo > %s' % config.config['main']['logPath'])
     for i in range(0, config.config['ga']['populationSize']):
         genome = self.genomeType()
         genome.create()
         self.genomes.append(genome)
     log.info('initialized population of %d genomes' % config.config['ga']['populationSize'])
 def check_count_limit(self, type_name):
     t = self.world.unit_class(type_name)
     if t is None:
         info("couldn't check count_limit for %r", type_name)
         return False
     if t.count_limit == 0:
         return True
     if self.future_count(t.type_name) >= t.count_limit:
         return False
     return True
Exemple #14
0
 def _delay(self):
     max_delay = max([p.delay for p in self.human_players])
     if max_delay > .6:
         info("max_delay=%s => max_delay=.6")
         max_delay = .6
     turn_duration = VIRTUAL_TIME_INTERVAL / 1000.0 / float(self.speed)
     nb_turns = int(max_delay / turn_duration) + 1
     info("max_delay=%s turn_duration=%s => %s buffered turns", max_delay,
          turn_duration, nb_turns)
     return nb_turns
Exemple #15
0
 def quit_game(self, client): # called by a client already out of the game interface
     info("%s has quit from game %s after %s turns", client.login, self.id, self.time)
     self.human_players.remove(client)
     client.state = InTheLobby()
     if self.human_players:
         # remove the queue, and update the orders
         del self.all_orders[client]
         self._dispatch_orders_if_needed()
     else:
         self.close()
Exemple #16
0
 def queue_command(self, player, s):
     if player not in self.player.world.players:
         info("didn't send the order for player %s: %s", player, s)
         return
     if self.game_session.record_replay:
         player_index = self.player.world.players.index(player)
         self.game_session.replay_write(" ".join(map(str, (self.player.world.time, player_index, s))))
     if not hasattr(self, "_orders"):
         self._orders = []
     self._orders.append((player, s))
Exemple #17
0
def refresh_tweetstream( tweetstream_key ):    
    
    log.info("refreshing tweetstream %s" % tweetstream_key  )
        
    # the stream to update        
    stream = twitter.TweetStream.get_by_urlsafe(tweetstream_key)    

    # refresh the query
    s = Twitter.search( qs = stream.stream_query ) # based on the query string
    s.fetch(process_tweets, {"stream-key":tweetstream_key})
Exemple #18
0
 def _play(self):
     if not self._plan: return
     if self.watchdog and self.world.time > \
        self._previous_linechange + self.watchdog * 1000:
         self._line_nb += 1
     self._line_nb %= len(self._plan)
     line = self._plan[self._line_nb]
     cmd = line.split()
     if cmd:
         if cmd[0] == "goto":
             if re.match("^[+-][0-9]+$", cmd[1]):
                 self._line_nb += int(cmd[1])
             elif "label " + cmd[1] in self._plan:
                 self._line_nb = self._plan.index("label " + cmd[1])
             elif re.match("^[0-9]+$", cmd[1]):
                 self._line_nb = int(cmd[1])
             else:
                 warning("goto: wrong destination: %s", cmd[1])
                 self._line_nb += 1
         elif cmd[0] == "label":
             self._line_nb += 1
             info(cmd[1])
         elif cmd[0] == "goto_random":
             dest = worldrandom.choice(cmd[1:])
             if "label " + dest in self._plan:
                 self._line_nb = self._plan.index("label " + dest)
             else:
                 warning("goto_random: label not found: %s", dest)
                 self._line_nb += 1
         elif cmd[0] == "attack":
             self.attack()
             self._line_nb += 1
         elif cmd[0] in ("retaliate", "watchdog", "constant_attacks",
                         "research", "teleportation", "send_soldiers_to_base", "raise_dead"):
             setattr(self, cmd[0], int(cmd[1]))
             self._line_nb += 1
         elif cmd[0] == "get":
             n = 1
             done = True
             for w in cmd[1:]:
                 if re.match("^[0-9]+$", w):
                     n = int(w)
                 elif w in rules.classnames():
                     if not self.get(n, self.equivalent(w)):
                         done = False
                         break
                     n = 1
                 else:
                     warning("get: unknown unit: '%s' (in ai.txt)", w)
                     n = 1
             if done:
                 self._line_nb += 1
         else:
             warning("unknown command: '%s' (in ai.txt)", cmd[0])
             self._line_nb += 1
Exemple #19
0
def protect(request):
	world = World.objects.get(name=request.POST['namespace'])
	if not permissions.can_admin(request.user, world):
		return response_403()
	tileY, tileX = request.POST['tileY'], request.POST['tileX']
	# TODO: select for update
	tile, _ = Tile.objects.get_or_create(world=world, tileY=tileY, tileX=tileX)
	tile.properties['protected'] = True
	tile.save()
	log.info('ACTION:PROTECT %s %s %s' % (world.id, tileY, tileX))
	return HttpResponse('')
Exemple #20
0
 def load(self):
     genome_files = glob.glob(config.config['main']['populationPath'] + '*_genome.obj')
     if len(genome_files) == 0:
         raise Exception('no genomes found under '+config.config['main']['populationPath'])
     for genome_file in genome_files:
         try:
             genome = pickle.load(open(genome_file))
             self.genomes.append(genome)
         except EOFError:
             log.error("could not load genome from file %s" % genome_file)
     log.info('loaded %d genomes' % len(self.genomes))
Exemple #21
0
 def _register(self):
     try:
         s = urllib.urlopen(REGISTER_URL + "?version=%s&login=%s&ip=%s&port=%s" %
                            (VERSION, self.login, self.ip,
                             options.port)).read()
     except:
         s = "couldn't access to the metaserver"
     if s:
         warning("couldn't register to the metaserver (%s)", s[:80])
     else:
         info("server registered")
Exemple #22
0
    def handleGet(self, event):
        """Compute result in separate thread, doesn't affect GUI response."""
        #self.buttonGet.Enable(False)
        #self.buttonAbort.Enable(True)
        self.abortEvent.clear()
        self.jobID += 1

        log.info( "Starting job %s in producer thread: GUI remains responsive"
                  % self.jobID )
        delayedresult.startWorker(self._resultConsumer, self._resultProducer,
                                  wargs=(self.jobID,self.abortEvent), jobID=self.jobID)
Exemple #23
0
 def no_new_contigs(self, count):
     """Make the are new contigs in the assembler output."""
     if count == db.iteration_overlap_count(
             self.state['cxn'],
             self.state['iteration'],
             self.args['bit_score'],
             self.args['contig_length']):
         log.info('No new contigs were found in iteration {}'.format(
             self.state['iteration']))
         return True
     return False
Exemple #24
0
 def installPacks(self, projdir, packnames):
     packsdir = os.path.join(projdir, 'packages')
     run = self.__runRoot('xterm', 
                 '-e sh -c \"apt-get -y -o dir::cache::archives=\\\"%s\\\" ' \
                 '--allow-unauthenticated install %s; echo \\\"Press [ENTER] to exit.\\\"; ' \
                 'read x\"' % (packsdir, packnames))
     if run[0] != 0:
         log.error(_('exit code:%i\n%s' % (run[0], run[1])))
         log.info(_('Failed to start Xterm as root'))
         return False
     #log.info(_('installCache: success'))
     return True
def main():
    try:
        for i, message in enumerate(streamer.iter_stream(STREAM_URL)):
            handle_message(message)
            if i and i % 100 == 0:
                log.info('Processed %d messages', i)
    except KeyboardInterrupt:
        log.info('Bye bye!')
    except Exception, e:
        log.exception('Error handling message: %s', e)
        log.warn('Exiting...')
        return 1
Exemple #26
0
def start_server_and_connect(parameters):
    info("active threads: %s", threading.enumerate())
    ServerInAThread(parameters).start()
    # TODO: catch exceptions raised by the starting server
    # for example: RegisteringError ProbablyNoInternetError
    # voice.alert([4049]) # "The server couldn't probably register on the metaserver. check you are connected to the Internet."
    # voice.alert([4080]) # "failure: the server couldn't start"
    time.sleep(.01) # Linux needs a small delay (at least on the Eee PC 4G)
    revision_checker.start_if_needed()
    connect_and_play()
    info("active threads: %s", threading.enumerate())
    sys.exit()
Exemple #27
0
 def increase(self):
     for param in self.params:
         old = param.currentValue
         param.increase()
         config.config['ga'][param.name] = param.currentValue
         if old != param.currentValue:
             log.info('changed %s from %s to %s'%(param.name, str(old), str(param.currentValue)))
             if param.name == 'genomeSize':
                 for param2 in self.params:
                     if param2.name != 'genomeSize':
                         param2.resetToOriginalValue()
                 log.info('genomeSize change triggered resetting of the other params')
                 break
Exemple #28
0
 def remove_client(self, client):
     info("disconnect: %s" % client.login)
     client.is_disconnected = True
     if client in self.clients: # not anonymous
         self.clients.remove(client)
         for c in self.players_not_playing():
             c.send_msg([client.login, 4259]) # ... has just disconnected
         self.update_menus()
     if isinstance(client.state, Playing):
         client.cmd_abort_game([])
     if self._is_admin(client) and not self.is_standalone:
         info("the admin has disconnected => close the server")
         sys.exit()
     self.log_status()
Exemple #29
0
def walkIndicatorItems(ind):
    global level,iocEvalString, cacheItems
    cache=False

    lastii=ind.findall("./*[local-name()='IndicatorItem']")[-1]

    #if we are to look for a filename/registry, then attributes of the hit, save the hit.
    searches=walksearches(ind)
    if ('FileItem/' in str(searches)) or ('RegistryItem/' in str(searches)):
        cache=True
        
    for i in ind.findall("./*[local-name()='IndicatorItem']"):        
        #do we know how to handle this IOC?
        #split it into category/attribute
        itemTarget=i.Context.attrib.get("search")        
        iocMajorCategory=itemTarget.split('/')[0]
        iocAttribute=itemTarget.split('/')[-1]
        #some ioc attributes are all lower, 1 upper, then lower, camel case, etc..normalize to lower case.
        iocAttribute=iocAttribute.lower()
        #optimistic result default. change if you are pessimistic ;-]
        iocResult= False        
        #let python tell us what functions we support by eval'ing our include iocItems modules.
        if iocMajorCategory in dir(iocItems):
            if iocAttribute in dir(eval(iocMajorCategory)):
                #tell the function about items we've cached? 
                if 'cacheItems' in eval(iocMajorCategory + '.' + iocAttribute + '.func_code.co_varnames'):
                    #iocResult=eval(iocMajorCategory + '.' + iocAttribute + '("' + str(i.Content) + '")')
                    if cache:
                        iocResult=eval("%s.%s(r'%s',cacheItems,True)" %(iocMajorCategory,iocAttribute,i.Content))
                        debug('cache items: %s' %(str(cacheItems)))
                    else:
                        iocResult=eval("%s.%s(r'%s',[],False)" %(iocMajorCategory,iocAttribute,i.Content))

                else:
                    #iocResult=eval(iocMajorCategory + '.' + iocAttribute + '("' + str(i.Content) + '")')
                    iocResult=eval("%s.%s(r'%s')" %(iocMajorCategory,iocAttribute,i.Content))
            else:
                debug('cannot evaluate %s'%( (iocMajorCategory + '.' + iocAttribute + '("' + str(i.Content) + '")')))
        else:
            debug('cannot evaluate %s'%( (iocMajorCategory + '.' + iocAttribute + '("' + str(i.Content) + '")')))

        logicOperator=str(i.getparent().attrib.get("operator")).lower()        
        if i == lastii:
            debug('\t'*level + str(iocResult))
            info('\t'*level + i.Context.attrib.get("search") + ' ' + i.attrib.get("condition") + ' ' + str(i.Content))            
            iocEvalString+=' ' + str(iocResult)
        else:
            debug('\t'*level + str(iocResult) +' ' + str(logicOperator))
            info('\t'*level + i.Context.attrib.get("search") + ' ' + i.attrib.get("condition") + ' ' + str(i.Content) + ' ' + str(logicOperator))            
            iocEvalString+=' ' + str(iocResult) + ' ' + str(logicOperator)
Exemple #30
0
 def abort_game(self, client): # called by a client already out of the game interface
     info("%s has disconnected from game %s after %s turns", client.login, self.id, self.time)
     self.human_players.remove(client)
     client.state = InTheLobby() # useful if the client just aborted a game but has not disconnected
     if self.human_players:
         # give the last order for the other players
         for p in self.human_players:
             self.all_orders[p].insert(0, ["update" + NEWLINE_REPLACEMENT, None])
         self.all_orders[client].insert(0, ["quit" + NEWLINE_REPLACEMENT, None])
         self._dispatch_orders_if_needed()
         # remove the queue, and update the orders
         del self.all_orders[client]
         self._dispatch_orders_if_needed()
     else:
         self.close()
    def delete_snap(self):

        add_test_info.sub_test_info("6", "Delete VM snap")
        snap_to_delete = self.nova_server.snap_delete(self.snap.snap)
        assert snap_to_delete.execute, "Snap deletion error"
        snapshot = self.nova_server.get_snap(self.snap.snap)
        self.timer.wait_for_state_change(snapshot.snap.status, "ACTIVE")

        log.info("status: %s" % snapshot.snap.status)

        snap_exists = self.nova_server.get_snap(self.snap.snap)

        if snap_exists.snap.status == "DELETED":
            log.info("VM snap deleted")
        else:
            log.error("Snap status: %s" % snap_exists.snap.status)
            raise AssertionError("VM snap still exists")

        add_test_info.sub_test_completed_info()
Exemple #32
0
    def write_input_files(self):
        """Write blast hits and matching ends to fasta files."""
        log.info('Writing assembler input files: iteration {}'.format(
            self.state['iteration']))

        with open(self.file['paired_1'], 'w') as end_1, \
                open(self.file['paired_2'], 'w') as end_2:

            for row in db.get_blast_hits_by_end_count(self.state['db_conn'],
                                                      self.state['iteration'],
                                                      2):

                self.file['paired_count'] += 1
                out_file = end_1 if row['seq_end'] == '1' else end_2

                out_file.write('>{}/{}\n'.format(row['seq_name'],
                                                 row['seq_end']))
                out_file.write('{}\n'.format(row['seq']))

        with open(self.file['single_1'], 'w') as end_1, \
                open(self.file['single_2'], 'w') as end_2, \
                open(self.file['single_any'], 'w') as end_any:

            for row in db.get_blast_hits_by_end_count(self.state['db_conn'],
                                                      self.state['iteration'],
                                                      1):

                if row['seq_end'] == '1':
                    out_file = end_1
                    seq_end = '/1'
                    self.file['single_1_count'] += 1
                elif row['seq_end'] == '2':
                    out_file = end_2
                    seq_end = '/2'
                    self.file['single_2_count'] += 1
                else:
                    out_file = end_any
                    seq_end = ''
                    self.file['single_any_count'] += 1

                out_file.write('>{}{}\n'.format(row['seq_name'], seq_end))
                out_file.write('{}\n'.format(row['seq']))
Exemple #33
0
def handle_upload():
    submit = php_get("submit")

    if submit == "cancel":
        cancel_pending_events()

    elif submit == "commit":
        commit_pending_events()

    elif submit == "submitfile":
        ftype = php_get("fieldType")
        meta = FILES["fieldFile"]
        filename = meta["tmp_name"]
        name = meta["name"]
        events = None
        message = None

        log.info("Handling %s file upload: %s", ftype, filename)

        if ftype == "varioustxt":
            events, sheetcount, rowcount = read_various_text(filename, name)
        elif ftype == "storythemes":
            events, sheetcount, rowcount = read_storythemes(filename)
        elif ftype == "compactstorythemes":
            message = expload_compact_storythemes(filename)
        elif ftype == "storydefinitions":
            events, sheetcount, rowcount = read_stories(filename)
        elif ftype == "themedefinitions":
            events, sheetcount, rowcount = read_themes(filename)
        else:
            message = "Type %s is not yet supported" % ftype

        if message is None and events is not None:
            save_pending_events(events)
            message = "" if events else "Nothing to do."
            message += " Found %s changes in %s sheets and %s rows." % (
                len(events),
                sheetcount,
                rowcount,
            )

        return message
    def delete_backup(self, backup):

        add_test_info.sub_test_info('5', 'delete backup')
        backup_to_delete = self.cinder_backup.delete_backup(backup)

        assert backup_to_delete.execute, "Backup delete initialize error"

        backup_exists = self.cinder_backup.get_backup(backup_to_delete)
        self.timer.wait_for_state_change(backup_exists.status, 'deleting')

        log.info('status: %s' % backup_exists.status)
        backup_exists = self.cinder_backup.get_backup(backup_to_delete)

        if not backup_exists.status:
            log.info('backup deleted')
        else:
            log.error('Backup status: %s' % backup_exists.status)
            raise AssertionError("Backup still exists")

        add_test_info.sub_test_completed_info()
Exemple #35
0
def create_query_from_contigs(args, assembler):
    """Crate a new file with the contigs used as the next query."""
    log.info('Creating new query files: iteration {}'.format(
        assembler.state['iteration']))

    query_dir = join(args['temp_dir'], 'queries')
    os.makedirs(query_dir, exist_ok=True)

    query_file = assembler.file_prefix() + 'long_reads.fasta'
    query = join(query_dir, query_file)
    assembler.file['long_reads'] = query

    with open(query, 'w') as query_file:
        for row in db.get_assembled_contigs(assembler.state['cxn'],
                                            assembler.state['iteration'],
                                            assembler.args['bit_score'],
                                            assembler.args['contig_length']):
            util.write_fasta_record(query_file, row[0], row[1])

    return query
Exemple #36
0
 def remove_client(self, client):
     info("disconnect: %s" % client.login)
     client.is_disconnected = True
     if client in self.clients:  # not anonymous
         self.clients.remove(client)
         for c in self.players_not_playing():
             if client.is_compatible(c):
                 c.send_msg([client.login,
                             4259])  # ... has just disconnected
         self.update_menus()
     if isinstance(client.state, Playing):
         client.cmd_abort_game([])
     elif isinstance(client.state, WaitingForTheGameToStart):
         client.cmd_unregister([])
     elif isinstance(client.state, OrganizingAGame):
         client.cmd_cancel_game([])
     if self._is_admin(client) and not self.is_standalone:
         info("the admin has disconnected => close the server")
         sys.exit()
     self.log_status()
Exemple #37
0
 def _validate(self, field, force=False):
     if not field.input:
         return
     try:
         # self._set_field_status(field, self.STATUS_LOADING, '')
         value = field.input.get_value()
         if field.validator:
             result = field.validator(field, value)
             value = value if result is None else result
             log.info('Validation passed for %s.%s', self.namespace,
                      field.name)
         self._set_field_status(field, self.STATUS_OK, '')
     except Exception as err:
         log.warn('Validation Error for %s.%s: %s', self.namespace,
                  field.name, err)
         self._set_field_status(field, self.STATUS_ERROR, str(err))
     finally:
         log.info('Setting value %s.%s: %s', self.namespace, field.name,
                  value)
         field.value = value
Exemple #38
0
def sign_release(infile):
    """
    Signs both the clearsign and the detached signature of a Release file.

    Takes a valid path to a release file as an argument.
    """
    args = ['gpg', '-q', '--default-key', signingkey, '--batch', '--yes',
            '--homedir', gpgdir]

    clearargs = args + ['--clearsign', '-a', '-o',
                        infile.replace('Release', 'InRelease'), infile]
    detachargs = args + ['-sb', '-o', infile+'.gpg', infile]

    info('Signing Release (clearsign)')
    cleargpg = Popen(clearargs)
    cleargpg.wait(timeout=5)

    info('Signing Release (detached sign)')
    detachgpg = Popen(detachargs)
    detachgpg.wait(timeout=5)
    def delete_vol(self, volume):

        add_test_info.sub_test_info('4', 'delete volume')
        vol_to_delete = self.cinder_volume.delete_volume(volume=volume)

        assert vol_to_delete.execute, "snapshot volume delete initialize error"

        volume_exists = self.cinder_volume.get_volume(volume)
        self.timer.wait_for_state_change(volume_exists.status, 'deleting')

        log.info('status: %s' % volume_exists.status)
        volume_exists = self.cinder_volume.get_volume(volume)

        if not volume_exists.status:
            log.info('snapshot volume deleted')
        else:
            log.error('volume status: %s' % volume_exists.volume.status)
            raise AssertionError("snapshot volume still exists")

        add_test_info.sub_test_completed_info()
Exemple #40
0
def blast_query_against_all_shards(assembler):
    """
    Blast the query against the SRA databases.

    We're using a map-reduce strategy here. We map the blasting of the query
    sequences and reduce the output into one fasta file.
    """
    log.info('Blasting query against shards: iteration {}'.format(
        assembler.state['iteration']))

    all_shards = shard_fraction(assembler)

    with Pool(processes=assembler.args['cpus']) as pool:
        results = [
            pool.apply_async(blast_query_against_one_shard,
                             (assembler.args, assembler.simple_state(), shard))
            for shard in all_shards
        ]
        all_results = [result.get() for result in results]
    log.info('All {} blast results completed'.format(len(all_results)))
Exemple #41
0
 def abort_game(
         self,
         client):  # called by a client already out of the game interface
     info("%s has disconnected from game %s after %s turns", client.login,
          self.id, self.time)
     self.human_players.remove(client)
     client.state = InTheLobby(
     )  # useful if the client just aborted a game but has not disconnected
     if self.human_players:
         # give the last order for the other players
         for p in self.human_players:
             self.all_orders[p].insert(
                 0, ["update" + NEWLINE_REPLACEMENT, None])
         self.all_orders[client].insert(
             0, ["quit" + NEWLINE_REPLACEMENT, None])
         self._dispatch_orders_if_needed()
         # remove the queue, and update the orders
         del self.all_orders[client]
         self._dispatch_orders_if_needed()
     else:
         self.close()
    def create_vol(self, name, size):

        add_test_info.sub_test_info('1', 'create volume')

        init_create_volume = self.cinder_volume.create_volume(name, size)

        assert init_create_volume.status, "Volume Create initialize error"

        log.info('volume_name %s' % init_create_volume.vol.name)

        self.timer.wait_for_state_change(init_create_volume.vol.status,
                                         'creating')
        volume = self.cinder_volume.get_volume(init_create_volume.vol)

        assert volume.status, "Volumes Does Exist, hence did not create"

        self.volume = volume.volume

        log.info('volume exist status: %s' % volume.volume.status)

        add_test_info.sub_test_completed_info()
Exemple #43
0
def preprocess(args):
    """Build the databases required by atram."""
    log.setup(args['log_file'], args['blast_db'])

    with util.make_temp_dir(where=args['temp_dir'],
                            prefix='atram_preprocessor_',
                            keep=args['keep_temp_dir']) as temp_dir:
        util.update_temp_dir(temp_dir, args)

        with db.connect(args['blast_db'], clean=True) as cxn:
            db.create_metadata_table(cxn)

            db.create_sequences_table(cxn)
            load_seqs(args, cxn)

            log.info('Creating an index for the sequence table')
            db.create_sequences_index(cxn)

            shard_list = assign_seqs_to_shards(cxn, args['shard_count'])

        create_all_blast_shards(args, shard_list)
    def snapshot_delete(self):

        add_test_info.sub_test_info('5', 'delete snapshot')
        snap_delete = self.cinder_snap.delete_snapshot(self.snapshot)

        assert snap_delete, "Snapshot delete initialize error"

        snapshot_exists = self.cinder_snap.get_snapshot(self.snapshot)
        self.timer.wait_for_state_change(snapshot_exists.snapshot.status,
                                         'deleting')

        log.info('status: %s' % snapshot_exists.snapshot.status)
        snapshot_exists = self.cinder_snap.get_snapshot(self.snapshot)

        if not snapshot_exists.status:
            log.info('snapshot deleted')
        else:
            log.error('snapshot status: %s' % snapshot_exists.snapshot.status)
            raise AssertionError("snapshot still exists")

        add_test_info.sub_test_completed_info()
    def delete_server(self):

        add_test_info.sub_test_info("3", "Delete VM")

        vm_to_delete = self.nova_server.vm_delete(self.vm.vm)
        assert vm_to_delete.execute, "VM deletion error"
        vm_exists = self.nova_server.vm_details(self.vm.vm)
        self.timer.wait_for_state_change(vm_exists.vm.status, "ACTIVE")
        time.sleep(10)

        log.info("status: %s" % vm_exists.vm.status)

        vm_exists = self.nova_server.vm_details(self.vm.vm)

        if not vm_exists.status:
            log.info("VM deleted")
        else:
            log.error("VM status: %s" % vm_exists.vm.status)
            raise AssertionError("VM still exists")

        add_test_info.sub_test_completed_info()
    def delete_server(self, server):

        add_test_info.sub_test_info('5', 'Delete VM')

        vm_to_delete = self.nova_server.vm_delete(server)
        assert vm_to_delete.execute, 'VM deletion error'
        vm_exists = self.nova_server.vm_details(server)
        self.timer.wait_for_state_change(vm_exists.vm.status, 'ACTIVE')

        log.info('status: %s' % vm_exists.vm.status)
        time.sleep(10)

        vm_exists = self.nova_server.vm_details(server)

        if not vm_exists.status:
            log.info('VM deleted')
        else:
            log.error('VM status: %s' % vm_exists.vm.status)
            raise AssertionError("VM still exists")

        add_test_info.sub_test_completed_info()
Exemple #47
0
def stopRunningJob(status_file,log_dir,lib_dir,branch):
    state_obj = status.getState(status_file)

    cleaned = False
    pid = getPid()
    if pid != None:
        log.info(u"Job with pid '{}' killed.".format(pid))
        os.kill(int(pid), signal.SIGTERM)
        cleaned = True

        # Prepare file
        job.modifyStoppedFile(log_dir, state_obj,branch)
    
    if state_obj["vid"] != None:
        if virtualbox.destroyMachine(state_obj["vid"]):
            cleaned = True
        else:
            log.info(u"Cleaning status file.")
        status.setVID(status_file,None)
        
    virtualbox.checkMachines(lib_dir, False)

    if cleaned:
        if state_obj != None:
            status.setState(status_file,u"cleaned")
            return status.getState(status_file);
    else:
        log.info(u"Nothing stopped.")
        
    return None
def exec_test_1():

    uuid.set_env()

    global add_test_info

    add_test_info = AddTestInfo(
        4, "Restore Backup of the volume to a new volume of the same size")
    try:

        add_test_info.started_info()
        cinder_auth = CinderAuth()
        auth = cinder_auth.auth()

        assert auth.status, "Authentication Failed"

        cinder_volume = CindeVolumeTest(auth)

        volume1 = cinder_volume.create_vol("test_volume1", 1)

        backup = cinder_volume.take_backup(volume1, "test_volume1_bkp")

        volume2 = cinder_volume.create_vol("test_volume_2", 1)

        restore = cinder_volume.restore_backup(backup, volume2)

        cinder_volume.delete_vol(volume2)

        cinder_volume.delete_backup(backup)

        cinder_volume.delete_vol(volume1)

        log.info("restore obj %s:" % restore)

        add_test_info.success_status("ok")

    except AssertionError, e:
        log.error(e)
        add_test_info.failed_status("error")
        sys.exit(1)
def exec_test_2():

    uuid.set_env()

    global add_test_info

    add_test_info = AddTestInfo(
        5, 'Restore Backup of the volume to a larger volume ')
    try:

        add_test_info.started_info()
        cinder_auth = CinderAuth()
        auth = cinder_auth.auth()

        assert auth.status, "Authentication Failed"

        cinder_volume = CindeVolumeTest(auth)

        volume1 = cinder_volume.create_vol('test_volume1', 1)

        backup = cinder_volume.take_backup(volume1, 'test_volume1_bkp2')

        volume2 = cinder_volume.create_vol('test_volume2', 2)

        restore = cinder_volume.restore_backup(backup, volume2)

        cinder_volume.delete_vol(volume2)

        cinder_volume.delete_backup(backup)

        cinder_volume.delete_vol(volume1)

        log.info('restore obj %s:' % restore)

        add_test_info.success_status('ok')

    except AssertionError, e:
        log.error(e)
        add_test_info.failed_status('error')
        sys.exit(1)
Exemple #50
0
def assembly_loop(args, assembler, blast_db, query):
    """Iterate over the assembly processes."""
    for iteration in range(1, assembler.args['iterations'] + 1):
        log.info('aTRAM blast DB = "{}", query = "{}", iteration {}'.format(
            blast_db,
            split(query)[1], iteration))

        assembler.init_iteration(blast_db, query, iteration)

        with util.make_temp_dir(where=assembler.args['temp_root'],
                                prefix=assembler.file_prefix(),
                                keep=args['keep_temp_dir']) as iter_dir:

            assembler.setup_files(iter_dir)

            query = assembly_loop_iteration(args, assembler)

            if not query:
                break

    else:
        log.info('All iterations completed')
Exemple #51
0
 def _get(self, nb, types):
     self._safe_cnt += 1
     if self._safe_cnt > 10:
         info("AI has trouble getting: %s %s", nb, types)
         self.AI_timer = 100
         return False
     if isinstance(types, str):
         types = [types]
     if self.nb(types) >= nb:
         return True
     for type in types:
         if type.__class__ == str:
             type = self.world.unit_class(type)
         if type is None:
             continue
         if self.nb(self.world.get_makers(type)) > 0:
             self.build_or_train_or_upgradeto(type, nb - self.nb(types))
             break
         elif self.world.get_makers(type):
             self._get(1, self.world.get_makers(type)[0])
             return False
     return False
Exemple #52
0
def load_one_file(args, cxn, file_name, ends, seq_end_clamp=''):
    """Load sequences from a fasta/fastq file into the atram database."""
    log.info('Loading "{}" into sqlite database'.format(file_name))

    parser = get_parser(args, file_name)

    with util.open_file(args, file_name) as sra_file:
        batch = []

        for rec in parser(sra_file):
            title = rec[0].strip()
            seq = rec[1]
            seq_name, seq_end = blast.parse_fasta_title(
                title, ends, seq_end_clamp)

            batch.append((seq_name, seq_end, seq))

            if len(batch) >= db.BATCH_SIZE:
                db.insert_sequences_batch(cxn, batch)
                batch = []

        db.insert_sequences_batch(cxn, batch)
Exemple #53
0
 def __init__(self, name, bases, dct):
     self.__name__ = name
     self.type_name = name
     self.cls = bases[0]
     if "cost" not in dct and hasattr(self.cls, "cost"):
         dct["cost"] = [0] * rules.get("parameters", "nb_of_resource_types")
     if "sight_range" in dct and dct["sight_range"] == 1 * PRECISION:
         dct["sight_range"] = 12 * PRECISION
         dct["bonus_height"] = 1
         info(
             "in %s: replacing sight_range 1 with sight_range 12 and bonus_height 1",
             name)
     if "special_range" in dct:
         del dct["special_range"]
         dct["range"] = 12 * PRECISION
         dct["minimal_range"] = 4 * PRECISION
         dct["is_ballistic"] = 1
         info(
             "in %s: replacing special_range 1 with range 12, minimal_range 4 and is_ballistic 1",
             name)
     self.dct = dct
     self.init_dict(self)
 def sentMessage(self, obj):
     item = self.autoMessage.get((self.className, self.method), None)
     if not item:
         return
     userClassHelper = ClassHelper("UserInfo")
     userInfo = userClassHelper.find_one({"user": self.userId},
                                         {'nickName': 1})
     if not userInfo:
         return
     obj.update(userInfo)
     title = item['title'].format(**obj)
     template = item['template'].format(**obj)
     # TODO
     pushData = {"userid": obj['to'], "title": title, "text": template}
     log.info("Push Data:%s", json.dumps(pushData))
     try:
         res = requests.post(self.pushUrl,
                             data=json.dumps(pushData),
                             headers={'X-MeCloud-Debug': '1'})
         print res.text
     except Exception, ex:
         log.err("Push Err:%s", ex)
Exemple #55
0
def add(url, source_id, source_url):
    """Add a URL from a specific source to the datastore. source_id should be
    the Twitter user ID of a source and source_url should be the URL to the
    specific tweet where the URL was found.

    Returns the number of times we've seen the URL.

    Under the hood, information about a URL is recorded in a couple of places:

     1. The source_url is stored under a key composed of the sha1 hashes of
        the source_id and the url. This ensures that we record only the first
        source of a given URL shared by a given user.

     2. The key from above is added to a set under a key composed of the sha1
        hash of the url. The size of this set indicates whether a given link
        is over the threshold. The sort value of the key in the set is the
        timestamp at which we saw the corresponding URL.
    """
    url_hash = sha1_hash(url)
    source_hash = sha1_hash(str(source_id))

    url_key = 'url:' + url_hash
    source_key = 'src:{}:{}'.format(source_hash, url_hash)
    ttl = 60 * 60 * 12

    pipe = DB.pipeline()
    pipe.setnx(source_key, source_url)
    pipe.zadd(url_key, source_key, time.time())
    pipe.zcount(url_key, '-inf', 'inf')
    pipe.expire(url_key, ttl)
    pipe.expire(source_key, ttl)
    source_key_added, url_key_added, url_key_count, _, _ = pipe.execute()

    if not source_key_added:
        log.info('URL %s from user %s already in datastore', url, source_id)

    return url_key_count
Exemple #56
0
def start_face_cal():
    """
    开始计算人脸向量
    :return:
    """
    media_helper = ClassHelper('Media')
    client = RedisDb.get_connection(dbid=1)
    redis_key = _build_media_redis_key()
    while client.llen(redis_key):
        media_id = client.lpop(redis_key)
        media = media_helper.get(media_id)
        if not media:
            log.err('media not found: %s' % media_id)
            continue
        faces = get_faces_from_media_id(media_id)
        face_ids = []
        for face_info in faces:
            face_info['media'] = media_id
            face = create_face_db(face_info)
            face_ids.append(face['_id'])
        # 没有获取到的话就不更新了
        if face_ids:
            media_helper.update(media_id, {'$set': {'faces': face_ids}})
            log.info('media face calculate finish: %s' % media_id)
Exemple #57
0
def assembly_loop(assembler, blast_db, query):
    """Iterate the assembly processes."""
    for iteration in range(1, assembler.args['iterations'] + 1):
        log.info('aTRAM blast DB = "{}", query = "{}", iteration {}'.format(
            blast_db, query, iteration))

        assembler.initialize_iteration(blast_db, query, iteration)

        os.makedirs(assembler.iter_dir(), exist_ok=True)

        blast_query_against_all_shards(assembler)

        if assembler.blast_only or assembler.no_blast_hits():
            break

        assembler.write_input_files()

        assembler.run()

        if assembler.nothing_assembled():
            break

        high_score = filter_contigs(assembler)

        count = assembler.assembled_contigs_count(high_score)

        if not count:
            break

        if assembler.no_new_contigs(count):
            break

        query = create_query_from_contigs(assembler)

    else:
        log.info('All iterations completed')
Exemple #58
0
def showRunningJobs(lib_dir):
    pid = getPid()
    if pid != None:
        log.info(u"Main process is running with pid '{}'.".format(pid))
    else:
        log.info(u"Main process is not running.")
    
    processes = []
    
    ci_result = helper.execCommand("ps -alx | grep 'ci_job_handler'")
    ci_lines = ci_result.stdout.decode("utf-8").split(u"\n")
    formatProcesses(ci_lines,processes)
    
    vm_result = helper.execCommand("ps -alx | grep virtualbox")
    vm_lines = vm_result.stdout.decode("utf-8").split(u"\n")
    formatProcesses(vm_lines,processes)
    
    if len(processes) > 0:
        log.info(u"Following sub processes are running.")
        log.info(u"\n".join(processes))
    else:
        log.info(u"No sub processes are running.")

    virtualbox.checkMachines(lib_dir, True)
Exemple #59
0
def fetch_plex_instance(pi_dash, username=None, password=None, host=None):
    username = username or pi_dash.config.get('plexserver', 'username', from_keyring=True)
    password = password or pi_dash.config.get('plexserver', 'password', from_keyring=True)
    host = host or pi_dash.config.get('plexserver', 'host', '')
    if username:
        log.info('Logging into MyPlex with user %s', username)
        user = MyPlexAccount.signin(username, password)
        log.info('Connecting to Plex host: %s', host)
        return user.resource(host).connect()
    log.info('Connecting to Plex host: %s', host)
    return PlexServer(host)
def main():
    log.info('Starting thresholderbot...')
    try:
        for i, message in enumerate(streamer.iter_stream(STREAM_URL)):
            handle_message(message)
            if i and i % 100 == 0:
                log.info('Processed %d messages', i)
    except KeyboardInterrupt:
        log.info('Bye bye!')
    except Exception, e:
        log.exception('Error handling message: %s', e)
        log.warn('Exiting...')
        return 1