def get(self, file): logger.info('get [{0}].[{1}] to [{2}]'.format(self.remote_path, file, self.local_path)) if self.local_path: local_file = '{0}/{1}'.format(self.local_path, file) self.client.get(file, local_file) return self
def eval_k(self, algorithm, topks=None, measures=None): """评估""" if measures is None: measures = ["rmse", "mae"] logger.info('algorithm:' + algorithm.__class__.__name__) eval_results = [] cur = 0 for train_set, test_set in self.cross_validation(): cur += 1 algorithm.train(train_set) if self.save_sim: temp_path = '{}/sim/{}/'.format(self.output_path, algorithm.__class__.__name__) if not os.path.exists(temp_path): os.makedirs(temp_path) np.savetxt('{}/fold{}.csv'.format(temp_path, cur), algorithm.sim, fmt='%0.3f') eval_result_fold = [] for k in topks: logger.info('k={}'.format(k)) algorithm.topk = k eval_result_fold.append(algorithm.estimate(test_set, measures)) eval_results.append(eval_result_fold) if self.just_test_one: break self.show_result_k(algorithm, measures, eval_results, topks)
def teardown(): response = api.query({'ksql': 'show queries;'}) for query in response.json()[0]['queries']: api.query({'ksql': 'terminate {};'.format(query['id'])}) for entry in stmts_teardown: logger.info(entry['stmt']) api.query({'ksql': entry['stmt']}, force_exit=False, show_output=False)
def estimate(self, raw_test_dataset, measures): with tl.Timer() as t: error = self._estimate(raw_test_dataset, measures) logger.info("{} algorithm predict process cost {:.3f} sec".format( self.__class__.__name__, t.interval)) return error
def update_feed(feedid, rset, itemlist: list): # cleanup list of new feeds _itemlist = itemlist.copy() # list of new item's links olditems = Item.query.filter( Item.feedid == feedid).all() # list of old items in feed for iter in olditems: if str(iter.link) in _itemlist: _itemlist.remove(iter.link) # 새 feed가 없고, 이전에도 feed가 없었던 경우 if (len(_itemlist) == 0) and (len(itemlist) == len(olditems)): return False # delete all old feed items # todo: feed별 삭제여부를 지정할 수 있도록 할 것 logger.debug("remove all old items in feed: {}".format(feedid)) for olditem in olditems: db_session.delete(olditem) count = 1 for iter in itemlist: logger.info("processing...: {count} / {length}".format_map({ 'count': count, 'length': len(itemlist) })) count = count + 1 feeditem = update_feeditem(feedid, iter, rset) if feeditem: db_session.add(feeditem) db_session.commit() return True
def ssh(self): self.client.connect(hostname=self.host_config['hostname'], port=self.host_config['port'], username=self.host_config['username'], password=self.host_config['password']) logger.info('login to {0} -> {1}'.format(self.host_config, 'success')) return self
def prerun(self, t, verbose=True): if verbose: msg = "=============\nAlgorithm Time: {}\nCash: {}\nPositions: {}\n".format( t, self.get_cash(), self.get_positions() ) logger.info(msg) self.t = t
def create_and_add_reply(replyable, response_url, hero_id): """Method that creates a reply in reddit format and adds the reply to comment/submission. The reply consists of a link to the response audio file, the response itself, a warning about the sound and an ending added from the config file (post footer). Image is currently ignored due to new reddit redesign not rendering flairs properly. :param replyable: The comment/submission on reddit :param response_url: The url to the response audio file :param hero_id: The hero_id to which the response belongs to. :return: The text for the comment reply. """ original_text = replyable.body if isinstance(replyable, Comment) else replyable.title original_text = original_text.strip() if '>' in original_text: original_text = get_quoted_text(original_text).strip() if '::' in original_text: original_text = original_text.split('::', 1)[1].strip() hero_name = db_api.get_hero_name(hero_id) reply = "[{}]({}) (sound warning: {}){}".format(original_text, response_url, hero_name, config.COMMENT_ENDING) replyable.reply(reply) logger.info("Replied to: " + replyable.fullname)
def update_reply(replyable, response_info): """Method to edit and update existing response comment by the bot with a new hero as requested. :param replyable: The comment/submission on reddit :param response_info: ResponseInfo containing hero_id and link for response :return: None """ bot_comment = replyable.parent() root_replyable = bot_comment.parent() # TODO maybe get original text from bot's command, rather than the original post, as it might be edited by the time this command is called original_text = root_replyable.body if isinstance( root_replyable, Comment) else root_replyable.title original_text = original_text.strip() if '>' in original_text: original_text = get_quoted_text(original_text).strip() # Getting name with Proper formatting hero_name = db_api.get_hero_name(response_info.hero_id) reply = "[{}]({}) (sound warning: {}){}".format(original_text, response_info.link, hero_name, config.COMMENT_ENDING) bot_comment.edit(reply) logger.info("Updated Reply: " + replyable.fullname)
def check_duration(self, step, duration): try: if (duration > step.deadline): logger.info('Sync step %s missed deadline, took %.2f seconds'%(step.name,duration)) except AttributeError: # S doesn't have a deadline pass
def login_user(userid, password): logger.info("User Login") user_data = User.query.filter_by(userid=userid).first() if user_data is not None: if not user_data.check_password(password): logger.error("Authentication error: Wrong userid or password") return { 'message': 'Authentication error: Wrong userid or password', "authenticated": False }, 401 # Set JWT_ACCESS_TOKEN_EXPIRES to change token duration. access_token = create_access_token(identity=user_data) refresh_token = create_refresh_token(identity=user_data) logger.info("Access token created") logger.debug(f'access_token: {access_token}') resp = { 'login': True, 'msg': 'New login', 'access_token': access_token, #'refresh_token': refresh_token } _set_cookies(access_token, refresh_token) return resp, 200 else: logger.error("User Does Not Exist") return {'message': 'User Does Not Exist', "authenticated": False}, 401
def register_execution(self, seconds_from_now: str, start_at: str): start_time = compute_start_time(seconds_from_now, start_at) start_time_ms = start_time * 1000 for ID in self.active_workers_ids(): url = f"{self._workers[ID]['baseURL']}/worker/execution?start_time={start_time_ms}" try: rs = requests.post(url) logger.info(f"POST {url} -> {rs.status_code}") except RequestException as e: logger.error(e) self.stop() raise e self._sequence_scheduler.register_task(0, init_mixer) audio_file = self.sequence_file('ogg') self._sequence_scheduler.register_task( 1, lambda: load_audio_file(audio_file)) register_countdown( lambda delay_seconds, fun: self._sequence_scheduler.register_task( start_time_ms - PLAYBACK_LATENCY + (delay_seconds * 1000), fun)) self._sequence_scheduler.register_task( start_time_ms - PLAYBACK_LATENCY, play_music) super().register_tasks(start_time_ms) self._active_sequence['start_time'] = start_time
def batch_consumers(fi_id, generator, q, batch_limit=PER_THREAD_BATCH_LIMIT): logger.info("Starting API batching for FI {}".format(fi_id)) # iterate over each consumer num_batches = 0 for batch in generator: api_batch = [] num_batches += 1 # TODO: Modify generator SQL to name fields instead of iterating over them all for row in batch: api_batch.append(map_row_to_marketingcloud_api(row, fi_id)) # send to MC when we have gathered enough for batch limit while True: try: q.put({ "batch_num": num_batches, "fi_id": fi_id, "data": api_batch }, False) break except Full: sleep(1) except Exception: logger.exception('Failed to put data on the worker queue') logger.debug("Queuing batch {} for FI {} from thread {}".format(num_batches, fi_id, threading.current_thread().getName())) logger.info(f"Completed delivery of marketing cloud messages for FI {fi_id}, {num_batches} were delivered")
def set_position(): logger.info('Update absolute camera position') try: data = request.get_json() x = data.get('x') y = data.get('y') z = data.get('z') logger.info('newpos: ', {"x": x, "y": y, "z": z}) resp = requests.get( f'http://{DEVICE_IP}/isp/appispmu.cgi?btOK=submit&i_mt_dirx={x}&i_mt_diry={y}&i_mt_dirz={z}', auth=HTTPDigestAuth(DEVICE_ID, DEVICE_PW)) # logger.info(resp.text) if resp.status_code == 200: return { 'message': 'Successfully updated camera position.', 'result': { "x": x, "y": y, "z": z } }, 200 else: raise CGIException(resp) except Exception as e: logger.error(e) logger.debug(traceback.format_exc()) raise e
def run(self): threads = [] for name, producer_func in self.producers.items(): self.event_manager.add_event(name) thread = threading.Thread(name=name, target=self.__producer, args=(producer_func, )) threads.append(thread) thread.start() num = 0 while num < self.num_consumers: thread = threading.Thread(name="consumer_{}".format(num + 1), target=self.__consumer, args=(self.consumer_func, )) threads.append(thread) thread.start() num += 1 for t in threads: t.join() self.queue.join() logger.info( f"completed work, {len(self.thread_exception_list)} exceptions were encountered. " f"{self.thread_exception_list if len(self.thread_exception_list) > 0 else ''}" )
def _create_model(self, input_shape, num_classes): model = Sequential() model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=input_shape, name="conv1_1")) model = self.add_conv(model, 1, block_num=1, filter_num=64, kernel_size=(3, 3)) model.add(MaxPooling2D(pool_size=(2, 2), padding="same")) model = self.add_conv(model, 2, block_num=2, filter_num=128, kernel_size=(3, 3)) model.add(MaxPooling2D(pool_size=(2, 2), padding="same")) model = self.add_conv(model, 3, block_num=3, filter_num=256, kernel_size=(3, 3)) model.add(MaxPooling2D(pool_size=(2, 2), padding="same")) model = self.add_conv(model, 3, block_num=4, filter_num=512, kernel_size=(3, 3)) model.add(MaxPooling2D(pool_size=(2, 2), padding="same")) # model = self.add_conv(model, 3, block_num=5, filter_num=512, kernel_size=(3, 3)) # model.add(MaxPooling2D(pool_size=(2, 2), padding="same")) model.add(Flatten()) # model.add(Dense(4096, activation='relu')) # model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) logger.info("mode.summary:{}".format(model.summary())) return model
def processMongoData(collection=None, ids=None, date=None): ''' 多线程处理mongo数据信息 :param collection: 表 :param ids: 对应的ids :return: none ''' # key0: 商场id , key2(内层字典的key): neg(负面内容) ,pos(正面的内容),type(商场/店铺) commentMap = getMongoData(collection=collection, ids=ids, date=date) ## 获取mongodb中的评论数据 if len(commentMap) == 0: logger.info('没有获取到数据,param:{0},{1}'.format(collection, ids)) else: for key, value in commentMap.items(): for commentType in ['pos', 'neg']: ## 处理好评数据 if value is None or value.get(commentType) is None: continue result = getAnalyseData(value[commentType]) if result is None: continue ## 将缓存中的前50 跟当前的结果相比,如果当前词出现在缓存中,则直接将该词的出现次数相加。 ## 如果缓存中不存在,则直接缓存当前值。对所有词根据词频进行排序,取前50 缓存,前50保存到数据库 result = MergeHistroyFromES(key, value["type"], const.NEGATIVE_COMMENT, result) if result is not None: saveDataToDb(key, value["type"], commentType, result) result = None commentMap = None ## 将所有的对象都置空,防止出现内存泄漏的情况
def _evaluate(self, step=None, record=False, idx=None): """Run one rollout if in eval mode Run num_record_samples rollouts if in train mode """ vids = [] for i in range(self._config.num_record_samples): rollout, info, frames = self._runner.run_episode(is_train=False, record=record) if record: ep_rew = info["rew"] ep_success = "s" if info["episode_success"] else "f" fname = "{}_step_{:011d}_{}_r_{}_{}.mp4".format( self._config.env, step, idx if idx is not None else i, ep_rew, ep_success, ) self._save_video(fname, frames) vids.append(frames) if idx is not None: break logger.info("rollout: %s", {k: v for k, v in info.items() if not "qpos" in k}) return rollout, info, np.array(vids)
def wrapper(*args, **kwargs): start_time = time.time() get_str = func(*args, **kwargs) end_time = time.time() logger.info( str(func) + " cost_time: {}".format(end_time - start_time)) return get_str
def get_twitter_data(client, node: str, field: dict): for k, v in dict(field).items(): if not v: field.pop(k) encoded_field = parse.urlencode(field) url = base + node + '?' + encoded_field logger.info(f'request to "{url}"') logger.debug( f'request info : <base : "{base}"> <node: {node}> <encoded_field: {field}>' ) response, data = client.request(url) # type : dict, bytes logger.debug(f'response status : {response["status"]}') try: if response['status'] == '200': ret = json.loads(data.decode('utf-8')) # print(ret) return ret except Exception as e: print("ERR:", e) logger.error('cannot responsed from : "%s" ' % (response['status'], url)) logger.error('error message :', e) return None
def instantiate(event): # Try to parse the event into json. try: event = json.loads((event.decode())) except ValueError: return ActionError( EventError.instantiate("{}: is not valid json".format(event))) # Ensure the event has a type and payload. if any(key not in event for key in ("eventType", "payload")): return ActionError( EventError.instantiate( "Event does not contain an event type or payload")) logger.info("New message: {}".format(event["eventType"])) # Instantiate the event. return { EventType.REGISTER_TEST_STRATEGY.value: lambda: ActionRegisterTestStrategy(event["payload"]), EventType.LIMIT_ORDER.value: lambda: ActionLimitOrder(event["payload"]), EventType.TICK.value: lambda: ActionTick(), EventType.END_OF_CHART_DATA.value: lambda: ActionEndOfChartData(event["payload"]), EventType.NEW_CHART_DATA.value: lambda: ActionNewChartData(event["payload"]), EventType.ERROR.value: lambda: ActionError(event["payload"]), EventType.OK.value: lambda: ActionOk(), }[event["eventType"]]().action_type
def update_device(**kwargs): logger.info('Update existing device') now = datetime.utcnow() try: query = db.session.query(Device).filter_by( serial=kwargs.get('serial')).one() if kwargs.get('newserial'): query.serial = kwargs.get('newserial') if kwargs.get('model'): query.model = kwargs.get('model') if kwargs.get('company'): query.company = db.session.query(Company).filter_by( name=kwargs.get('company')).one() if kwargs.get('ip'): query.ip = kwargs.get('ip') if kwargs.get('owner'): query.owner = db.session.query(User).filter_by( userid=kwargs.get('owner')).one() query.edited = now query.edited_by = current_user db.session.commit() return {'message': f'Updated device<{query.serial}> from db.'}, 200 except Exception as e: logger.error(e) logger.debug(traceback.format_exc()) raise e
def task_entrance(self, task): try: with gevent.Timeout(self.workload.timeout): self.__func(task) except gevent.Timeout: self.workload.complete_workload(task, '52', 'NULL') logger.info('>>>>>>>>>>>>>> task timeout!' + str(task))
def _train_model(self): logger.info('Training -----------') for step in range(5000): cost = self.model.train_on_batch(self.train_data_x, self.train_data_y) if step % 50 == 0: logger.info("After %d trainings, the cost: %f" % (step, cost))
def create_device(**kwargs): logger.info('Register new device') now = datetime.utcnow() try: query = Device.query.filter_by(serial=kwargs.get('serial')).first() if query is not None: logger.error("Device already exists") return {'message': 'Device already exists'}, 409 company = db.session.query(Company).filter_by( name=kwargs.get('company')).one() owner = db.session.query(User).filter_by( userid=kwargs.get('owner')).one() device = Device(model=kwargs.get('model'), serial=kwargs.get('serial'), company=company, owner=owner, ip=kwargs.get('ip'), created=now, created_by=kwargs.get('created_by', current_user), last_edited=now, edited_by=kwargs.get('edited_by', current_user), is_deleted=False) db.session.add(device) db.session.commit() return { 'message': f'Posted device<{kwargs.get("serial")}> to db.' }, 201 except Exception as e: logger.error(e) logger.debug(traceback.format_exc()) raise e
def openPage(url, query=dict(), headers=dict()) -> responsedString: global opener if len(query) == 0: query = None else: query = urlencode(query) req = request.Request(url, query, headers, origin_req_host=None) res = None count = 0 while count < 3: import socket try: res = opener.open(req) except error.URLError: count = count + 1 logger.info("-* failed connect to : {}. retrying {} / 3".format( (url, count))) continue except socket.error: count = count + 1 continue except: raise ConnectionError else: break if not res: raise ConnectionError ret = responsedString(res) logger.debug("page opened. returned {} length. Info : {}".format( (len(ret)), res.info())) return ret
def on_capture_success(task_id): logger.info("Callback triggered: Adding image record of capture.") try: res = celery_app.AsyncResult(task_id) data = res.get() res.forget() logger.info(f'data: {data}') image = Image(cell=data.get('cell'), path=data.get('path'), device=data.get('device'), created=data.get('created'), created_by=data.get('created_by'), label=data.get('label'), offset_x=data.get('offset_x'), offset_y=data.get('offset_y'), offset_z=data.get('offset_z'), pos_x=data.get('pos_x'), pos_y=data.get('pos_y'), pos_z=data.get('pos_z')) db.session.add(image) db.session.commit() return jsonify('Successfully registered image record'), 200 except Exception as e: logger.error(e) return jsonify('Failed to register image record'), 200
def load_audio_file(filename: str): global sound sound = pygame.mixer.Sound(filename) sound.set_volume(1.0) logger.info( f"▶ audio file loaded [{os.stat(filename).st_size}]: '{filename}'")
def create_user(**kwargs): logger.info("User registration") now = datetime.utcnow() try: query = User.query.filter_by(userid=kwargs.get('userid')).first() if query is not None: logger.error("Userid already exists") return {'message': 'userid already exists'}, 409 comp_name = kwargs.get('company') if comp_name is None: return {'message': 'Company name is empty.'}, 400 kwargs.update({ 'company': Company.query.filter_by(name=comp_name).one(), 'created': now, 'last_edited': now, 'is_deleted': False }) user = User(**kwargs) user.hash_password() db.session.add(user) db.session.commit() logger.info('User registration successful') return {'message': 'User registration successful'}, 201 except NoResultFound as e: return {'message': 'Company doesn\'t exist.'}, 400 except Exception as e: logger.error(traceback.format_exc()) raise e
def get_post_page(shortcode): node_path = 'p/' field = { '__a': 1, } logger.info(f'get {shortcode} post') return get_instagram_data(node_path, shortcode, field)
def _save_model1(self, params, save_path): config = tf.ConfigProto() config.gpu_options.allow_growth = True keras.backend.set_session(tf.Session(config=config)) params = param_preprocess.param_prepare(params, 'save') keras.backend.clear_session() anchors = np.array(params['model']['anchors']).reshape(-1, 2) num_anchors = 9 num_classes = len(params['model']['class_names']) input_image = keras.layers.Input(shape=(None, None, 3), name='input_image') input_image_meta = keras.layers.Input(shape=(2,), name='input_image_meta') yolo_model = yolo_body(input_image, num_anchors // 3, num_classes) logger.info('load weights from {}'.format(params['evaluate']['weights_path'])) yolo_model.load_weights(params['evaluate']['weights_path']) boxes, box_scores = keras.layers.Lambda(my_yolo_eval, output_shape=None, name='yolo_eval1', arguments={'anchors': anchors, 'num_classes': num_classes})( [yolo_model.output[0], yolo_model.output[1], yolo_model.output[2], input_image_meta]) boxes = keras.layers.Lambda(lambda x: identity(x, name="boxes"))(boxes) box_scores = keras.layers.Lambda(lambda x: identity(x, name="box_scores"))(box_scores) model = keras.models.Model([input_image, input_image_meta], [box_scores, boxes]) h52pb(model, save_path) logger.info('save pb model in {}'.format(save_path))
def load_sync_steps(self): dep_path = Config().observer_dependency_graph logger.info('Loading model dependency graph from %s' % dep_path) try: # This contains dependencies between records, not sync steps self.model_dependency_graph = json.loads(open(dep_path).read()) except Exception,e: raise e
def run(self): logger.debug("Running %s" % self) while(not self.stop_event.isSet()): StateThread.pause_event.wait() if callable(self.fxn): self.fxn() self.stop_event.clear() logger.info("Exiting %s" % self)
def check_schedule(self, step): time_since_last_run = time.time() - self.last_run_times.get(step.__name__, 0) try: if (time_since_last_run < step.requested_interval): raise StepNotReady except AttributeError: logger.info('Step %s does not have requested_interval set'%step.__name__) raise StepNotReady
def __init__(self, workers, thread_name, func): ''' ''' self.__workers = workers self.__busy = False self.thread_name = thread_name self.__func = func threading.Thread.__init__(self, None, None, self.thread_name, (), {}) logger.info("%s init complete" % self.thread_name)
def run(self): ''' ''' self.__busy = True while( self.__busy ): self.__func() self.__busy = False logger.info("%s stop" %self.thread_name)
def register_in_master(self): ''' 向master注册,并获得master分配的id ''' data = {"name": self.info.name, "server": self.info.server, "path": self.info.path, "server_ip": self.info.server_ip, "type": self.info.type, 'recv_real_time_request': self.info.recv_real_time_request} path = "/register_slave?" + urllib.urlencode(data) id = self.__client.get(path).strip() id = id.strip('\0') if len(id) == 0 or not id.isdigit(): return False self.info.id = int(id) logger.info("slave register id is : %d" % self.info.id) return True
def load_sync_step_modules(self, step_dir=None): if step_dir is None: if hasattr(Config(), "observer_steps_dir"): step_dir = Config().observer_steps_dir else: step_dir = "/opt/planetstack/observer/steps" for fn in os.listdir(step_dir): pathname = os.path.join(step_dir,fn) if os.path.isfile(pathname) and fn.endswith(".py") and (fn!="__init__.py"): module = imp.load_source(fn[:-3],pathname) for classname in dir(module): c = getattr(module, classname, None) # make sure 'c' is a descendent of SyncStep and has a # provides field (this eliminates the abstract base classes # since they don't have a provides) if inspect.isclass(c) and issubclass(c, SyncStep) and hasattr(c,"provides") and (c not in self.sync_steps): self.sync_steps.append(c) logger.info('loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps]))
def wake_up(self): logger.info('Wake up routine called. Event cond %r'%self.event_cond) self.event_cond.acquire() self.event_cond.notify() self.event_cond.release()
#/usr/bin/env python __author__="henry" __date__ ="$Mar 25, 2011 3:35:38 PM$" from devices.factory import factory from util.logger import logger import config def main(): pass if __name__ == "__main__": logger.info("Starting Elixys Server")
SELECT players.id, players.name, v_numWins.wins, v_numMatches.matchesPlayed AS matches FROM players LEFT JOIN v_numWins ON (players.id = v_numWins.id) JOIN v_numMatches ON (players.id = v_numMatches.id) ORDER BY wins DESC; """) conn.commit() conn.close() if __name__ == '__main__': # start logging logger.info('Started') # create the tournament DB create_db() logger.info('Created DB') # create tables and views create_tables() logger.info('Created tables') create_indices() logger.info('Created indices') create_views() logger.info('Created views') # Register players PLAYERS = ['Player 1', 'Player 2', 'Player 3', 'Player 4', 'Player 5', 'Player 6', 'Player 7', 'Player 8', 'Player 9', 'Player 10', 'Player 11', 'Player 12', 'Player 13', 'Player 14', 'Player 15', 'Player 16', 'Player 17', 'Player 18', 'Player 19', 'Player 20', 'Player 21', 'Player 22', 'Player 23', 'Player 24', 'Player 25', 'Player 26', 'Player 27', 'Player 28', 'Player 29', 'Player 30', 'Player 31', 'Player 32', 'Player 33', 'Player 34', 'Player 35', 'Player 36', 'Player 37', 'Player 38', 'Player 39', 'Player 40', 'Player 41', 'Player 42', 'Player 43', 'Player 44', 'Player 45', 'Player 46', 'Player 47', 'Player 48', 'Player 49', 'Player 50', 'Player 51', 'Player 52', 'Player 53', 'Player 54', 'Player 55', 'Player 56', 'Player 57', 'Player 58', 'Player 59', 'Player 60', 'Player 61', 'Player 62', 'Player 63', 'Player 64',]
def run(self): if not self.driver.enabled: return if (self.driver_kind=="openstack") and (not self.driver.has_openstack): return while True: try: logger.info('Waiting for event') tBeforeWait = time.time() self.wait_for_event(timeout=30) logger.info('Observer woke up') # Set of whole steps that failed failed_steps = [] # Set of individual objects within steps that failed failed_step_objects = set() for S in self.ordered_steps: step = self.step_lookup[S] start_time=time.time() sync_step = step(driver=self.driver) sync_step.__name__ = step.__name__ sync_step.dependencies = [] try: mlist = sync_step.provides for m in mlist: sync_step.dependencies.extend(self.model_dependency_graph[m.__name__]) except KeyError: pass sync_step.debug_mode = debug_mode should_run = False try: # Various checks that decide whether # this step runs or not self.check_class_dependency(sync_step, failed_steps) # dont run Slices if Sites failed self.check_schedule(sync_step) # dont run sync_network_routes if time since last run < 1 hour should_run = True except StepNotReady: logging.info('Step not ready: %s'%sync_step.__name__) failed_steps.append(sync_step) except: failed_steps.append(sync_step) if (should_run): try: duration=time.time() - start_time logger.info('Executing step %s' % sync_step.__name__) # ********* This is the actual sync step #import pdb #pdb.set_trace() failed_objects = sync_step(failed=list(failed_step_objects)) self.check_duration(sync_step, duration) if failed_objects: failed_step_objects.update(failed_objects) self.update_run_time(sync_step) except: failed_steps.append(S) self.save_run_times() except: logger.log_exc("Exception in observer run loop") traceback.print_exc()
except AttributeError,e: raise e def update_dep(d, o): try: print 'Trying to update %s'%d save_fields = [] if (d.updated < o.updated): save_fields = ['updated'] if (save_fields): d.save(update_fields=save_fields) except AttributeError,e: raise e except Exception,e: logger.info('Could not save %r. Exception: %r'%(d,e)) def delete_if_inactive(d, o): try: d.delete() print "Deleted %s (%s)"%(d,d.__class__.__name__) except: pass return @atomic def execute_model_policy(instance, deleted): # Automatic dirtying if (instance in bad_instances): return