def Main(groupname, config_name, agg_config_name, previous_time, current_time): uuid = hashlib.md5("%s%s%s%i%i" %(groupname, config_name, agg_config_name, previous_time, current_time)).hexdigest()[:10] logger = AggregateLogger(uuid) logger.info("Start aggregation: %s %s %s %i-%i" % (groupname, config_name, agg_config_name, previous_time, current_time)) conf = ParsingConfigurator(config_name, agg_config_name) ds = DistributedStorageFactory(**conf.ds) # Get Distributed storage if ds is None: logger.error('Failed to init distributed storage like MongoRS') return 'failed' if not ds.connect('combaine_mid/%s' % config_name): logger.error('Cannot connect to distributed storage like MongoRS') return 'failed' res_handlers = [item for item in (ResultHandlerFactory(**_cfg) for _cfg in conf.resulthadlers) if item is not None] aggs = dict((_agg.name, _agg) for _agg in (AggregatorFactory(**agg_config) for agg_config in conf.aggregators)) hosts = split_hosts_by_dc(groupname) all_data = list() for sbgrp in hosts.values(): data_by_subgrp = collections.defaultdict(list) for hst in sbgrp: _l = ((ds.read("%s;%s;%i;%i;%s" % (hst.replace('-','_').replace('.','_'),\ config_name, previous_time, current_time, _agg)\ ), _agg) for _agg in aggs) [data_by_subgrp[_name].append(val) for val, _name in _l] all_data.append(dict(data_by_subgrp)) res = [] for key in aggs.iterkeys(): l = [ _item[key] for _item in all_data if _item.has_key(key)] one_agg_result = AggRes(aggs[key].name, hosts.keys(), conf.metahost or groupname, agg_config_name) one_agg_result.store_result(next(aggs[key].aggregate_group(l))) res.append(one_agg_result) #==== Clean RS from sourse data for aggregation ==== logger.info("Hadling data by result handlers") print res_handlers try: for _res_handler in res_handlers: _res_handler.send(res) except Exception as err: logger.exception(err) ds.close() logger.info("Aggregation has finished successfully") return "Success"