コード例 #1
0
    def random_selection(self, n_samples):                        
        g = self.df_org.groupby(self.target_name)[self.id_name]
        # selected = []
        # selected.append(np.random.choice(g.get_group(0).tolist(), n_samples, replace=False))
        # selected.append(np.random.choice(g.get_group(1).tolist(), n_samples, replace=False))
        selected = [np.random.choice(g.get_group(i).tolist(), n_samples, replace=False) for i in range(N_CLASSES)]
        selected = np.concatenate(selected, axis=0)

        df_new = pd.DataFrame({self.id_name: selected})
        df_new = df_new.merge(self.df_org, on=self.id_name, how='left')
        get_logger().info('num of selected_images: %d' % len(df_new))

        return df_new
コード例 #2
0
class TestUserRegister(unittest.TestCase):

    case_dir = contants.case_dir
    do_excel = DoExcel(case_dir, 'userRegister')
    cases = do_excel.get_data()
    logger = get_logger("register")

    @classmethod
    def setUpClass(cls):
        pass
        cls.mysql = MysqlUtil(return_dict= True)  # 生成一个数据库对象,返回的是数据字典
        cls.uid_list = []

    def setUp(self):
        pass

    @data(*cases)
    def test_user_register(self,case):
        self.logger.info("开始执行{0}模块的第{1}条用例:{2}".format(case.module, case.case_id, case.title))
        if json.loads(case.data)["ip"] == "${ip}":
            get_ip()
        if json.loads(case.data)["mobile"] == "${mobile}":
            check_mobile()
            if json.loads(case.data)["verify_code"] == "${verify_code}":
                get_code(getattr(context.Context, 'mobile'))  # 调用发送验证码的函数,发送验证码并将验证码设置为Context类的一个属性
コード例 #3
0
    def __init__(self,
                 urls,
                 sem_size,
                 stats,
                 headers=None,
                 char_mode=0,
                 is_proxy=False,
                 if_report=False):
        self.urls = urls
        self.char_mode = char_mode
        self.stats = stats
        self.is_proxy = is_proxy
        self.if_report = if_report

        self.loop = asyncio.new_event_loop()
        asyncio.set_event_loop(self.loop)
        self.semaphore = asyncio.Semaphore(sem_size)

        if headers is not None:
            self.headers = headers

        if self.is_proxy:
            self.proxy = 'http://127.0.0.1:8080'
        else:
            self.proxy = None

        self.logger = get_logger()
コード例 #4
0
class TestVerifiedUserAuth(unittest.TestCase):

    case_dir = contants.case_dir
    do_excel = DoExcel(case_dir, 'verifyUserAuth')
    cases = do_excel.get_data()
    logger = get_logger("auth")

    @classmethod
    def setUpClass(cls):
        pass
        cls.mysql = MysqlUtil(return_dict= True)  # 生成一个数据库对象,返回的是数据字典
        try:
            with open(contants.uid_list_dir, 'r') as file:
                cls.uid_list = file.readlines()
        except FileNotFoundError as e:
            cls.logger.error("该文件不存在!!")

    def setUp(self):
        pass

    @data(*cases)
    def test_verified_user_auth(self,case):
        self.logger.info("开始执行{0}模块的第{1}条用例:{2}".format(case.module, case.case_id, case.title))
        if json.loads(case.data)["uid"] == "${uid}":
            global count
            self.uid = str(self.uid_list[count])[:-1]

            count += 1
            setattr(context.Context, 'uid', self.uid)
            self.logger.info("***********",getattr(context.Context, 'uid'))
        if json.loads(case.data)["cre_id"] == "${cre_id}":
            get_cre_id()
コード例 #5
0
ファイル: mlp_worker.py プロジェクト: link-kut/or
    def __init__(self, global_net, optimizer, global_episode,
                 global_episode_reward, message_queue, idx, project_home):
        super(Worker, self).__init__()
        self.name = 'worker-{0}'.format(idx)

        self.optimizer = optimizer
        self.global_net = global_net
        self.global_episode = global_episode
        self.global_episode_reward = global_episode_reward
        self.message_queue = message_queue

        self.local_model = MLP_Model(chev_conv_state_dim=5,
                                     action_dim=config.SUBSTRATE_NODES)

        logger_a3c_gcn_train = get_logger("a3c_gcn_train", project_home)

        self.env = A3C_GCN_TRAIN_VNEEnvironment(logger_a3c_gcn_train)
        self.agent = A3C_GCN_VNEAgent(
            self.local_model,
            beta=0.3,
            logger=logger_a3c_gcn_train,
            time_window_size=config.TIME_WINDOW_SIZE,
            agent_type=config.ALGORITHMS.BASELINE,
            type_of_virtual_node_ranking=config.TYPE_OF_VIRTUAL_NODE_RANKING.
            TYPE_2,
            allow_embedding_to_same_substrate_node=config.
            ALLOW_EMBEDDING_TO_SAME_SUBSTRATE_NODE,
            max_embedding_path_length=config.MAX_EMBEDDING_PATH_LENGTH)

        self.critic_loss = 0.0
        self.actor_objective = 0.0
コード例 #6
0
ファイル: bin_packing.py プロジェクト: ytkn/gurobi_book
def make_problem(bin_capacity: int, items: List[int]) -> pulp.LpProblem:
    log = logger.get_logger(__name__)
    n_bins = bins_by_greedy(bin_capacity, items)
    log.debug(f"greedy:{n_bins}")
    n_items = len(items)
    problem = pulp.LpProblem(name="bin_packing", sense=pulp.LpMinimize)
    x = {(i, j): pulp.LpVariable(name='x_{}_{}'.format(i, j),
                                 cat=pulp.LpBinary)
         for i, j in itertools.product(range(n_bins), range(n_items))}

    t = {
        i: pulp.LpVariable(name='t_{}'.format(i), cat=pulp.LpBinary)
        for i in range(n_bins)
    }

    for i in range(n_bins):
        problem.addConstraint(
            pulp.lpSum([x[i, j] * items[j]
                        for j in range(n_items)]) <= bin_capacity,
            name=f"capcacity_{i}")

    for j in range(n_items):
        problem.addConstraint(
            pulp.lpSum([x[i, j] for i in range(n_bins)]) >= 1)
        problem.addConstraint(
            pulp.lpSum([x[i, j] for i in range(n_bins)]) <= 1)

    for i, j in itertools.product(range(n_bins), range(n_items)):
        problem.addConstraint(t[i] >= x[i, j])

    problem.objective += pulp.lpSum([t[i] for i in range(n_bins)])
    return problem
コード例 #7
0
 def setup(self, name, force_create=False, *args, **kwargs):
     self.table_name = name
     self.datastore_cli = datastore.Client(
     )  # don't need a project id if creds path is exported
     self.logger = get_logger(__name__)
     if force_create:
         self.destroy()
コード例 #8
0
    def __init__(self, save_path='./report', report_name=None):
        self.save_path = save_path
        self.report_name = report_name

        if not os.path.isdir(self.save_path):
            os.mkdir(self.save_path)

        self.logger = get_logger()
コード例 #9
0
def convert_epoch_to_utc_date(timestamp, date_format="%Y-%m-%d %H:%M:%S"):
    log = get_logger(__name__)
    try:
        date_str = datetime.utcfromtimestamp(timestamp).strftime(date_format)
    except Exception as e:
        log.error(f'''Error in converting timestamp {timestamp}''', exc_info=True)
        date_str = datetime.utcnow().strftime(date_format)
    return date_str
コード例 #10
0
def main():
    logger.set_logger()
    log = logger.get_logger(__name__)
    # n = 50
    # x, y = make_points(n)
    n, x, y = read_hokkaido()
    graph = make_euclidean_graph(n, x, y)
    solution = make_problem_and_solve(graph, n)
    plot_graph(solution, x, y)
コード例 #11
0
def learn(device,
          env, nenv, seed,
          number_timesteps,
          network, optimizer,
          save_path, save_interval, ob_scale,
          gamma, timesteps_per_batch):
    """
    Paper:
    Williams R J. Simple Statistical Gradient-Following Algorithms for
    Connectionist Reinforcement Learning[J]. Machine Learning, 1992: 229-256.

    Parameters:
    ----------
        gamma (float): reward gamma
        batch_episode (int): how many episodes will be sampled before update

    """
    name = '{}_{}'.format(os.path.split(__file__)[-1][:-3], seed)
    logger = get_logger(name)

    policy = network.to(device)
    generator = _generate(device, env, policy, ob_scale,
                          number_timesteps // nenv, gamma, timesteps_per_batch)

    n_iter = 0
    total_timesteps = 0
    infos = {'eplenmean': deque(maxlen=100), 'eprewmean': deque(maxlen=100)}
    start_ts = time.time()
    while True:
        try:
            batch = generator.__next__()
        except StopIteration:
            break
        b_o, b_a, b_r, info = batch
        total_timesteps += b_o.size(0)
        for d in info:
            infos['eplenmean'].append(d['l'])
            infos['eprewmean'].append(d['r'])

        b_logits = policy(b_o)
        dist = torch.distributions.Categorical(logits=b_logits)
        b_logp = dist.log_prob(b_a)
        loss = -(b_logp * b_r).sum()  # likelihood ratio
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        n_iter += 1
        logger.info('{} Iter {} {}'.format('=' * 10, n_iter, '=' * 10))
        fps = int(total_timesteps / (time.time() - start_ts))
        logger.info('Total timesteps {} FPS {}'.format(total_timesteps, fps))
        for k, v in infos.items():
            v = (sum(v) / len(v)) if v else float('nan')
            logger.info('{}: {:.6f}'.format(k, v))
        if save_interval and n_iter % save_interval == 0:
            torch.save([policy.state_dict(), optimizer.state_dict()],
                       os.path.join(save_path, '{}.{}'.format(name, n_iter)))
コード例 #12
0
def main():
    logger.set_logger()
    log = logger.get_logger(__name__)
    n = 10
    p = 0.4
    edges = make_edges(n, p)
    problem = make_problem(n, edges)
    solve_with_log.exec(problem, False, 100)
    log.debug(f"by enumeration: {solution_by_enumeration(n, edges)}")
    log.debug(F"edges:{edges}")
コード例 #13
0
ファイル: lot_sizing.py プロジェクト: ytkn/gurobi_book
def main():
    logger.set_logger()
    log = logger.get_logger(__name__)
    instance = make_simple_instance()
    stock, production, setup = make_variables(instance)
    problem = make_problem(instance, stock, production, setup)
    solve_with_log.exec(problem)
    for i in range(5):
        log.debug(
            f"{pulp.value(production[0, i])},{pulp.value(stock[0, i])},{pulp.value(setup[0, i])}"
        )
コード例 #14
0
 def __init__(self):
     self.start_time = datetime.datetime.utcnow()
     cfgpath = sys.argv[1] if len(sys.argv) > 1 else ''
     self.root_dir = self.get_current_dir()
     self.config = Config().get_config(self.CONFIG_FILENAME, self.root_dir, cfgpath)
     self.log = get_logger(__name__, force_create=True, **self.config['Logging'])
     self.collection_config = self.config['Collection']
     self.api_config = self.config['GsuiteAlertCenter']
     op_cli = ProviderFactory.get_provider(self.config['Collection']['ENVIRONMENT'])
     self.kvstore = op_cli.get_storage("keyvalue", name=self.config['Collection']['DBNAME'])
     self.DEFAULT_START_TIME_EPOCH = get_current_timestamp() - self.collection_config['BACKFILL_DAYS']*24*60*60
     self.alertcli = self.get_alert_client()
コード例 #15
0
def main():
    logger.set_logger()
    log = logger.get_logger(__name__)
    n = 40
    x, y = make_points(n)
    graph = to_directed_graph(make_euclidean_graph(n, x, y))
    problem_loose = make_problem_by_loose_constraint(graph, n)
    problem_tight = make_problem_by_tight_constraint(graph, n)
    solution = solve(problem_loose, graph)
    plot_graph(solution, x, y)
    solution = solve(problem_tight, graph)
    plot_graph(solution, x, y)
コード例 #16
0
ファイル: helper_config.py プロジェクト: SimoWhatsup/pysurvey
    def __init__(self, name='MySurvey', species='HI', mosaic='skymap'):
        self.logger = get_logger()

        self.name = name
        self.species = species
        self.mosaic = mosaic

        self.constants_config = self.get_constants_config()

        self.survey_config = self.get_survey_config()
        self.mosaic_config = self.get_mosaic_config()
        self.spectral_config = self.get_spectral_config()
        self.spatial_config = self.get_spatial_config()
コード例 #17
0
ファイル: graph_coloring.py プロジェクト: ytkn/gurobi_book
def binary_seach(n: int, edges: List[Edge]) -> int:
    log = logger.get_logger(__name__)
    lb = 1
    rb = n
    while rb - lb > 1:
        c = int((lb + rb) / 2)
        problem = make_problem_for_feasibility(n, edges, c)
        problem.solve(pulp.PULP_CBC_CMD(msg=False))
        log.debug(f"c:{c}, status:{pulp.LpStatus[problem.status]}")
        if pulp.LpStatus[problem.status] == "Infeasible":
            lb = c
        else:
            rb = c
    return rb
コード例 #18
0
ファイル: test_api.py プロジェクト: tessyliu/test
class TestApi(unittest.TestCase):
    do_excel = DoExcel(contants.cases_dir)  #do_excel
    cases_login = do_excel.read_excel('login')
    cases_register = do_excel.read_excel('register')
    request_1 = RequestMethod()  #request
    mysql = MysqlUtil()  #数据库
    my_logger = logger.get_logger(
        logger_name="TestApi")  #各个模块中的logger_name可设置成一个名字

    def setUp(self):
        pass

    #@unittest.skip("不要运行")#被装饰的方法将不会被执行
    @data(*cases_login)  #登录接口测试
    def test_login(self, case):
        self.my_logger.info("开始执行第{}条登录用例".format(case.case_id))
        resp = self.request_1.request_method(case.method, case.url, case.data)
        try:
            self.assertEqual(case.expectedresult, resp.text)
            self.do_excel.write_back(case.case_id + 1, resp.text, "Pass")
            #问题一:为啥登录的实际结果可以写进Excel,执行结果pass/failed却写不进Excel呢?????????
            self.my_logger.info("第{}条登录用例执行结果:Pass".format(case.case_id))
        except AssertionError as e:
            self.do_excel.write_back(case.case_id + 1, resp.text, "Failed")
            self.my_logger.error("第{}条登录用例执行结果:Failed".format(case.case_id))
            raise e

    def tearDown(self):
        pass

    @unittest.skip("不执行注册用例")
    @data(*cases_register)  #注册接口测试
    def test_register(self, case):
        self.my_logger.info("开始执行第{}条注册用例".format(case.case_id))
        sql = "select max(mobilephone) from future.member"
        max = self.mysql.fetch_one(sql)[
            0]  # 返回结果是元组类型数据,放在此处,执行每一条用例时都会替换新的手机号
        data_dict = json.loads(case.data)  #case.data从Excel取出是字符串格式,需转换为字典
        if data_dict["mobilephone"] == "${register_mobile}":  #手机号参数化
            data_dict["mobilephone"] = int(max) + 1
            print(data_dict["mobilephone"])
        resp = self.request_1.request_method(case.method, case.url, data_dict)
        try:
            self.assertEqual(case.expectedresult, resp.text)
            self.do_excel.write_back(case.case_id + 1, resp.text, "Pass")
            self.my_logger.info("第{}条注册用例执行结果:Pass".format(case.case_id))
        except AssertionError as e:
            self.do_excel.write_back(case.case_id + 1, resp.text, "Failed")
            self.my_logger.error("第{}条注册用例执行结果:Failed".format(case.case_id))
            raise e
コード例 #19
0
ファイル: bin_packing.py プロジェクト: ytkn/gurobi_book
def main():
    logger.set_logger()
    log = logger.get_logger(__name__)

    bin_capacity, items = read_from_text(0)
    # bin_capacity, items = make_instance()
    # initial_solution = make_solution_by_greedy(bin_capacity, items)
    initial_solution = make_simple_solution(bin_capacity, items)
    solutions_str = "\n".join([f"{row}" for row in initial_solution])
    log.debug(f"=========initial_solution=========\n{solutions_str}")
    problem = make_problem_with_initial_solution(bin_capacity, items,
                                                 initial_solution)
    # problem = make_problem(bin_capacity, items)
    solve_with_log.exec(problem, True, 300)
コード例 #20
0
ファイル: lot_sizing.py プロジェクト: ytkn/gurobi_book
def make_problem(instance: Instance, stock: pulp.LpVariable,
                 production: pulp.LpVariable, setup: pulp.LpVariable):
    log = logger.get_logger(__name__)
    log.debug(instance.time_limit)
    n_products = instance.n_products
    n_terms = instance.n_terms

    problem = pulp.LpProblem(name="lot_sizing", sense=pulp.LpMinimize)

    problem.objective += pulp.lpSum([
        instance.setup_cost[p][t] * setup[p, t] +
        instance.production_cost[p][t] * production[p, t] +
        instance.stock_cost[p][t] * stock[p, t]
        for p, t in itertools.product(range(n_products), range(n_terms))
    ])

    for p in range(n_products):
        problem.addConstraint(stock[p, 0] >= 0)
        problem.addConstraint(stock[p, 0] <= 0)

    for p, t in itertools.product(range(n_products), range(n_terms)):
        problem.addConstraint(production[p, t] >= 0)

    for p, t in itertools.product(range(n_products), range(1, n_terms)):
        problem.addConstraint(stock[p, t] >= 0)

    for p, t in itertools.product(range(n_products), range(1, n_terms)):
        problem.addConstraint(stock[p, t] >= stock[p, t - 1] +
                              production[p, t - 1] - instance.demand[p][t - 1])
        problem.addConstraint(stock[p, t] <= stock[p, t - 1] +
                              production[p, t - 1] - instance.demand[p][t - 1])

    for p in range(n_products):
        problem.addConstraint(
            stock[p, n_terms - 1] + production[p, n_terms - 1] -
            instance.demand[p][n_terms - 1] >= 0)

    for t in range(n_terms):
        problem.addConstraint(
            pulp.lpSum([
                instance.setup_time[p][t] * setup[p, t] + production[p, t]
                for p in range(n_products)
            ]) <= instance.time_limit[t])

    for p, t in itertools.product(range(n_products), range(n_terms)):
        problem.addConstraint(
            production[p, t] <= setup[p, t] *
            (instance.time_limit[t] - instance.setup_time[p][t]))

    return problem
コード例 #21
0
ファイル: learner.py プロジェクト: jamlamberti/bogo_probe
    def __init__(self):
        """
        Initialize the classifier with whatever hyperparams you want
        """

        # I am currently throwing these logs into the log-dir
        # (specified in the config file)
        # It would probably be better to have test specific logging
        # also placed into the results dir

        logging_config = config.Section('logging')
        self.log = logger.get_logger(
            'learner',
            join(abspath(logging_config.get('log-dir')), 'learner.log'))
コード例 #22
0
    def load_class(cls, full_class_string, invoking_module_name):
        """
            dynamically load a class from a string
        """

        #  using importlib https://docs.python.org/3/library/importlib.html find_spec not working in 2.7
        log = get_logger(__name__)
        try:
            module_path, class_name = cls._split_module_class_name(full_class_string, invoking_module_name)
            module = importlib.import_module(module_path)
            return getattr(module, class_name)
        except Exception as e:
            t, v, tb = sys.exc_info()
            log.error(f"Unable to import Module {full_class_string} Error: {e} Traceback: {tb}")
            raise
コード例 #23
0
def make_problem_and_solve(G: networkx.Graph, n: int):
    log = logger.get_logger(__name__)
    problem = pulp.LpProblem(name="tsp", sense=pulp.LpMinimize)

    x = {(i, j): pulp.LpVariable(name='x_{}_{}'.format(i, j),
                                 cat=pulp.LpBinary)
         for (i, j) in G.edges}

    problem.objective += pulp.lpSum(
        [x[i, j] * G[i][j]['weight'] for (i, j) in G.edges])

    for i in range(n):
        problem.addConstraint(
            pulp.lpSum([x[j, i] for j in range(0, i)]) +
            pulp.lpSum([x[i, j] for j in range(i + 1, n)]) >= 2)
        problem.addConstraint(
            pulp.lpSum([x[j, i] for j in range(0, i)]) +
            pulp.lpSum([x[i, j] for j in range(i + 1, n)]) <= 2)

    solved = False
    while not solved:
        problem.solve(pulp.PULP_CBC_CMD(msg=False))
        g = networkx.Graph()
        for (i, j) in G.edges:
            if pulp.value(x[i, j]) == 1:
                g.add_edge(i, j)
        components = list(networkx.connected_components(g))
        log.info(f"components: {len(components)}")
        for c in components:
            log.info(c)
        if len(components) == 1:
            solved = True
        else:
            for component in components:
                problem.addConstraint(
                    pulp.lpSum([
                        x[min(i, j), max(i, j)]
                        for i, j in itertools.combinations(component, 2)
                    ]) <= len(component) - 1)

    solution = []
    log.info("solved")
    for (i, j) in G.edges:
        if pulp.value(x[i, j]) == 1:
            solution.append((i, j))

    return solution
コード例 #24
0
def make_problem(n: int, edges: List[Edge]) -> pulp.LpProblem:
    log = logger.get_logger(__name__)
    problem = pulp.LpProblem(name="bin_packing", sense=pulp.LpMinimize)
    x = {
        i: pulp.LpVariable(name='x_{}'.format(i), cat=pulp.LpBinary)
        for i in range(n)
    }
    y = {(i, j): pulp.LpVariable(name='y_{}_{}'.format(i, j),
                                 cat=pulp.LpBinary)
         for i, j in edges}

    problem.objective += pulp.lpSum([y[i, j] for i, j in edges])

    problem.addConstraint(pulp.lpSum([x[i] for i in range(n)]) >= n / 2)
    problem.addConstraint(pulp.lpSum([x[i] for i in range(n)]) <= n / 2)
    for i, j in edges:
        problem.addConstraint(x[i] - x[j] <= y[i, j])
        problem.addConstraint(x[j] - x[i] <= y[i, j])
    return problem
コード例 #25
0
ファイル: survey.py プロジェクト: SimoWhatsup/pysurvey
    def __init__(self, survey='MySurvey', species='HI', mosaic='skymap', read_config=False):

        self.logger = get_logger(survey + '_' + mosaic + '_' + species + '_Analysis')
        self.configfilename = survey + '_' + mosaic

        self.helper = HelperConfig(name=survey, species=species, mosaic=mosaic)

        survey_config = self.helper.survey_config
        mosaic_config = self.helper.mosaic_config
        constants_config = self.helper.constants_config
        spectral_config = self.helper.spectral_config
        spatial_config = self.helper.spatial_config

        if read_config:
            try:
                config_read_dict = self.helper.read_config(self.configfilename)

                survey_config_read = config_read_dict.get('survey')
                mosaic_config_read = config_read_dict.get('mosaic')
                constants_config_read = config_read_dict.get('constants')
                spectral_config_read = config_read_dict.get('spectral')
                spatial_config_read = config_read_dict.get('spatial')

            except FileNotFound:
                self.logger.error('One or more needed files do not exist')
                return

            survey_config = self.helper.check_config(survey_config, survey_config_read)
            mosaic_config = self.helper.check_config(mosaic_config, mosaic_config_read)
            constants_config = self.helper.check_config(constants_config, constants_config_read)
            spectral_config = self.helper.check_config(spectral_config, spectral_config_read)
            spatial_config = self.helper.check_config(spatial_config, spatial_config_read)

        self.survey_conf = survey_config
        self.mosaic_conf = mosaic_config
        self.utils_conf = constants_config
        self.spectral_conf = spectral_config
        self.spatial_conf = spatial_config

        self.flag_existance = False

        self.ret = re.compile('\n')
        self.helper.print_config(self.survey_conf, 'survey')
コード例 #26
0
def main():
    logger.set_logger()
    log = logger.get_logger(__name__)
    instance = read_instance('PSP_100_1.psp')
    stock, production, last_production, setup = make_variables(instance)
    problem = make_problem(instance, stock, production, last_production, setup)
    solve_with_log.exec(problem)

    for t in range(instance.n_terms):
        for p in range(instance.n_products):
            if pulp.value(production[p, t]) == 1:
                log.info(f"product:{t}:{p}")
            if pulp.value(last_production[p, t]) == 1:
                log.info(f"last_production:{t}:{p}")
            if pulp.value(stock[p, t]) >= 1:
                log.info(f"stock:{t}:{p}:{pulp.value(stock[p, t])}")

        for p, q in itertools.product(range(instance.n_products),
                                      range(instance.n_products)):
            if pulp.value(setup[p, q, t]) == 1:
                log.info(f"setup:{t}:{p}->{q}")
コード例 #27
0
ファイル: graph_coloring.py プロジェクト: ytkn/gurobi_book
def make_problem_for_feasibility(n: int, edges: List[Edge],
                                 n_colors: int) -> pulp.LpProblem:
    log = logger.get_logger(__name__)
    problem = pulp.LpProblem(name="graph_coloring", sense=pulp.LpMinimize)
    x = {(i, j): pulp.LpVariable(name=f"x_{i}_{j}", cat=pulp.LpBinary)
         for i, j in itertools.product(range(n), range(n_colors))}
    z = {(i, j): pulp.LpVariable(name=f"z_{i}_{j}", cat=pulp.LpBinary)
         for i, j in edges}

    problem.objective += pulp.lpSum([z[i, j] for i, j in edges])

    problem.addConstraint(pulp.lpSum([z[i, j] for i, j in edges]) <= 0)

    for i in range(n):
        problem.addConstraint(
            pulp.lpSum([x[i, j] for j in range(n_colors)]) >= 1)
        problem.addConstraint(
            pulp.lpSum([x[i, j] for j in range(n_colors)]) <= 1)

    for (i, j), k in itertools.product(edges, range(n_colors)):
        problem.addConstraint(x[i, k] + x[j, k] <= 1 + z[i, j])
    return problem
コード例 #28
0
def make_problem_by_tight_constraint(G: networkx.DiGraph,
                                     n: int) -> pulp.LpProblem:
    log = logger.get_logger(__name__)
    problem = pulp.LpProblem(name="tsp", sense=pulp.LpMinimize)

    x = {(i, j): pulp.LpVariable(name='x_{}_{}'.format(i, j),
                                 cat=pulp.LpBinary)
         for (i, j) in G.edges}

    u = {
        i: pulp.LpVariable(name='u_{}'.format(i), cat=pulp.LpInteger)
        for i in range(n)
    }

    problem.objective += pulp.lpSum(
        [x[i, j] * G[i][j]['weight'] for (i, j) in G.edges])

    problem.addConstraint(u[0] >= 0)
    problem.addConstraint(u[0] <= 0)

    for i in range(1, n):
        problem.addConstraint(u[i] - (n - 3) * x[i, 0] + x[0, i] >= 2)
        problem.addConstraint(u[i] - x[i, 0] + (n - 3) * x[0, i] <= n - 2)

    for i, j in itertools.product(range(n), range(1, n)):
        if i != j:
            problem.addConstraint(u[i] + 1 - (n - 1) * (1 - x[i, j]) +
                                  (n - 3) * x[j, i] <= u[j])

    for i in range(n):
        except_i = list(range(n))
        except_i.remove(i)
        problem.addConstraint(pulp.lpSum([x[i, j] for j in except_i]) >= 1)
        problem.addConstraint(pulp.lpSum([x[i, j] for j in except_i]) <= 1)
        problem.addConstraint(pulp.lpSum([x[j, i] for j in except_i]) >= 1)
        problem.addConstraint(pulp.lpSum([x[j, i] for j in except_i]) <= 1)

    return problem
コード例 #29
0
def make_problem(func, a: List[int]):
    log = logger.get_logger(__name__)
    n = len(a)
    b = [func(s) for s in a]
    z = {
        i: pulp.LpVariable(name=f"z_{i}", cat=pulp.LpContinuous)
        for i in range(n)
    }
    x = pulp.LpVariable(name="x", cat=pulp.LpContinuous)
    problem = pulp.LpProblem(name="convex_combination", sense=pulp.LpMinimize)
    problem.addConstraint(pulp.lpSum([z[i] * a[i] for i in range(n)]) >= x)
    problem.addConstraint(pulp.lpSum([z[i] * a[i] for i in range(n)]) <= x)
    problem.addConstraint(pulp.lpSum(z) <= 1)
    problem.addConstraint(pulp.lpSum(z) >= 1)
    for i in range(n):
        problem.addConstraint(z[i] >= 0)

    problem.objective += pulp.lpSum([z[i] * b[i] for i in range(n)])
    problem.sos2 = [z[i] for i in range(n)]
    solve_with_log.exec(problem)
    for i in range(n):
        log.info(pulp.value(z[i]))
    return problem
コード例 #30
0
    def __init__(self, target_addr, forgery_addr=None, save_path='./payloads', payload_filename=None, protocols=['http','https','dict'], ports=['22', '80', '443']):
        self.target_addr = target_addr
        self.forgery_addr = forgery_addr
        self.save_path = save_path
        self.payload_filename = payload_filename
        self.logger = get_logger()

        self.protocols = protocols
        self.ports = ports
        self.ip_formats_path = str(pathlib.Path(__file__).parent.absolute()) + '/resources/ip_formats.txt'
        self.localhost_formats_path = str(pathlib.Path(__file__).parent.absolute()) + '/resources/localhost_formats.txt'
        self.format_strings_path = str(pathlib.Path(__file__).parent.absolute()) + '/resources/format_strings.txt'
        self.cloud_payloads_path = str(pathlib.Path(__file__).parent.absolute()) + '/resources/cloud_payloads.txt'
        self.localhost_payloads_path = str(pathlib.Path(__file__).parent.absolute()) + '/resources/localhost_payloads.txt'

        if self.forgery_addr is None:
            self.forgery_ip = requests.get('https://checkip.amazonaws.com').text.strip()
            try:
                self.forgery_addr = self.get_addr_from_ip(self.forgery_ip)
            except socket.gaierror:
                self.logger.warning('DNS problem or there is no domain name connected with own public IP')
                self.forgery_addr
        else:
            try:
                self.forgery_ip = self.get_ip_from_addr(self.forgery_addr)
            except socket.gaierror:
                self.logger.warning('Wrong forgery address format or DNS problem')
                self.forgery_ip = None
        
        try:
            self.target_ip = self.get_ip_from_addr(self.target_addr)
        except socket.gaierror:
            self.logger.warning('Cannot obtain IP for target address')
            self.target_ip = None

        if not os.path.isdir(self.save_path):
            os.mkdir(self.save_path)
コード例 #31
0
import logging

from common.model import Player

from common.logger import get_logger
from ..session import Session
from ..telegram_interaction import MessageIds, TelegramInMessage
from ..texts import Texts

l = get_logger('Bot')
l.setLevel(logging.DEBUG)

l = get_logger('BotSession')
l.setLevel(logging.DEBUG)

logger = get_logger('TelegramInteraction')
l.setLevel(logging.DEBUG)

defaultRating = {'trueskill': [25, 2]}


class EmptyBackend():
    def add_game(self, game, who):
        return None, game

    def add_player(self, player, who):
        return None, player

    def get_player(self, nick=None, phone=None):
        if nick == 'exists':
            return None, Player(nick=nick, phone=phone, rating=defaultRating)
コード例 #32
0
import threading
import requests
import docopt
import json
import time
import yaml
import zmq

from common import logger

instrument_agent_port = 12572
base_api_url = 'instrument/api'

log_dir = os.path.join(instrument_dir, 'output_%s' % time.strftime('%Y%m%d-%H%M%S'))
log = logger.get_logger(file_output=os.path.join(log_dir, 'instrument_control.log'))


def flatten(particle):
    try:
        for each in particle.get('values'):
            id = each.get('value_id')
            val = each.get('value')
            particle[id] = val
        del (particle['values'])
    except:
        log.error('Exception flattening particle: %s', particle)
    return particle


def get_running(host):
コード例 #33
0
from common import logger
from common import edex_tools

edex_dir = os.getenv('EDEX_HOME')
if edex_dir is None:
    edex_dir = os.path.join(os.getenv('HOME'), 'uframes', 'ooi', 'uframe-1.0', 'edex')
hdf5dir = os.path.join(edex_dir, 'data', 'hdf5', 'sensorreading')
startdir = os.path.join(edex_dir, 'data/utility/edex_static/base/ooi/parsers/mi-dataset/mi')
drivers_dir = os.path.join(startdir, 'dataset/driver')
ingest_dir = os.path.join(edex_dir, 'data', 'ooi')
log_dir = os.path.join(edex_dir, 'logs')

output_dir = os.path.join(dataset_dir, 'output_%s' % time.strftime('%Y%m%d-%H%M%S'))

log = logger.get_logger(file_output=os.path.join(output_dir, 'everything.log'))

DEFAULT_STANDARD_TIMEOUT = 60


class TestCase(object):
    def __init__(self, config):
        self.config = config
        self.instrument = config.get('instrument')
        self.resource = os.path.join(drivers_dir, config.get('resource'))
        self.endpoint = os.path.join(ingest_dir, config.get('endpoint'))
        self.pairs = config.get('pairs', [])
        self.rename = config.get('rename', True)
        # Attempt to obtain a timeout value from the test_case yml.  Default it to
        # DEFAULT_STANDARD_TIMEOUT if no yml value was provided.
        self.timeout = config.get('timeout', DEFAULT_STANDARD_TIMEOUT)
コード例 #34
0
ファイル: sqlite_dao.py プロジェクト: xinchunli/new_era
# coding:utf-8

__author__ = 'xinchun.li'
__metaclass__ = type

from sqlalchemy.orm import aliased

from dao.database import db_session, init_db
from common.decorator import error_log
from common import logger


sql_logger = logger.get_logger(logger.SQL)


class MyTransaction:
    def __init__(self, session):
        self.session = session
        self.status = False

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        if self.session is None:
            return

        if exc_type is None:
            try:
                self.session.commit()
                self.status = True
    # Create and event used to keep this script alive while running the client
    event = threading.Event()
    event.clear()

    # Load the application configuration,
    # initialise logging and create the client
    args = argument_parser.get_arguments()
    config_parser = ConfigParser.SafeConfigParser()
    try:
        config_parser.readfp(open(args.config))
    except IOError, e:
        print('Invalid config file: {0}'.format(args.config))
        print('{0} ({1})'.format(e.strerror, e.errno))
        exit(1)
    the_config = config.Config(config_parser)
    the_logger = logger.get_logger(the_config.get_logger_conf_path())

    # Pick a USB device class
    (vendor_id, product_id) = the_config.get_vendor_and_product_ids()
    device_class = the_config.get_device_class()
    if device_class == 'PyUsbDevice':
        from devices import PyUsbDevice
        interface_number = the_config.get_interface_number()
        device = PyUsbDevice(vendor_id,
                             product_id,
                             interface_number)
    elif device_class == 'TeensyDevice':
        from devices import TeensyDevice
        (usage_page, usage) = the_config.get_usage_and_usage_page()
        device = TeensyDevice(vendor_id, product_id, usage_page, usage)
    elif device_class == 'Blink1Device':
コード例 #36
0
#!/urs/bin/python
#coding=utf-8

import re
import json
from common.XMLtoJSON import XMLtoJSON
from datetime import datetime, date
from tc_common import *

from common.define import session as Session
from common.logger import get_logger
tc_log = get_logger('tc_tkt_type')
from models.tongcheng_model import TcSceneryTicketType 

_xml = """
<request>
    <header>
        <version>%s</version> 
        <accountID>%s</accountID>
        <serviceName>%s</serviceName> 
        )<digitalSign>%s</digitalSign> 
        <reqTime>%s</reqTime>
    </header>
    <body>
        <sceneryId>%d</sceneryId>     
    </body>
</request>
"""

servicename = "GetSceneryTicketTypeList"
コード例 #37
0
instrument_dir = os.path.dirname(os.path.realpath('__file__'))
tools_dir = os.path.dirname(instrument_dir)

sys.path.append(tools_dir)

import time
import yaml
import pprint
import docopt
import instrument_control

from common import edex_tools
from common import logger

log_dir = os.path.join(instrument_dir, 'output_%s' % time.strftime('%Y%m%d-%H%M%S'))
log = logger.get_logger(file_output=os.path.join(log_dir, 'validate_instrument.log'))

MAX_ATTEMPTS = 5
RECORDS_PER_REQUEST = 1000


class TestCase(object):
    def __init__(self, config):
        self.config = config
        self.instrument = config.get('instrument')
        self.module = config.get('module')
        self.klass = config.get('klass')
        self.command_port = config.get('command_port')
        self.event_port = config.get('event_port')
        self.port_agent_config = config.get('port_agent_config')
        self.startup_config = config.get('startup_config')
コード例 #38
0
ファイル: helper_config.py プロジェクト: SimoWhatsup/pysurvey
# coding=utf-8
import os
from configparser import RawConfigParser
from common.logger import get_logger

SURVEY_CONFIG_DIR = 'survey_config/'
logger = get_logger()

class FileNotFound(BaseException):
    pass


class CommandNotFound(BaseException):
    pass


def check_for_files(file_list, existence=False):
    """
    Checks for the existence of needed files in the list.
    """
    for filename in file_list:
        if not os.path.exists(filename) and not existence:
            logger.error(filename + " doesn't exist.")
            raise FileNotFound
        elif os.path.exists(filename) and existence:
            logger.error(filename + " already exists.")
            raise FileNotFound


def check_for_command(command_list):
    """
コード例 #39
0
ファイル: __init__.py プロジェクト: vpino/kds
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

import os

from common.logger import get_logger

from ansible import utils
from ansible import callbacks
from ansible.playbook import PlayBook

log = get_logger()

stats = callbacks.AggregateStats()
playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY)
runner_cb = callbacks.PlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY)


def deploy_service(username, passwd, hosts, extras):

	ruta = os.path.join(ROLESDIR, 'ansible-role-mailserver/site.yml')
	
	pb = PlayBook(playbook=ruta, sudo=True, sudo_pass=passwd, host_list=hosts,
		remote_user=username, extra_vars=extras, callbacks=playbook_cb,
		runner_callbacks=runner_cb, stats=stats)
	
	pb.run()
コード例 #40
0
ファイル: get_scenery_list.py プロジェクト: ddmkchan/Utils
#!/urs/bin/python
#coding=utf-8

from datetime import datetime
from lxml import etree
from tongcheng.tc_common import *
from time import *

from common.define import session as Session
from common.logger import get_logger
tc_log = get_logger('tc_log')
from models.tongcheng_model import TCScenery

clientIp = "211.136.149.94"
servicename = "GetSceneryList"
pagesize = 100

_xml = """
<request>
    <header>
        <version>%s</version> 
        <accountID>%s</accountID>
        <serviceName>%s</serviceName> 
        <digitalSign>%s</digitalSign> 
        <reqTime>%s</reqTime>
    </header>
    <body>
        <clientIp>%s</clientIp> 
        <cityId>%d</cityId>     
        <page>%d</page>
        <pageSize>%d</pageSize>
コード例 #41
0
# coding:utf-8

__author__ = 'xinchun.li'

from sqlalchemy import create_engine, MetaData
from sqlalchemy.orm import scoped_session, sessionmaker

from common import config
from common import logger
from config import constant


diagnose_logger = logger.get_logger(logger.DIAGNOSE)

db_location = config.get(constant.SQLITE3_DB_LOCATION)
diagnose_logger.info('db_location=' + db_location)
engine = create_engine(db_location, convert_unicode=True)
metadata = MetaData()
db_session = scoped_session(sessionmaker(autocommit=False,
                                         autoflush=True,
                                         bind=engine))


def init_db():
    metadata.create_all(bind=engine)


def shutdown_session():
    db_session.remove()
コード例 #42
0
ファイル: get_scenery_list.py プロジェクト: ddmkchan/Utils
#!/urs/bin/python
#coding=utf-8

from datetime import datetime
from lxml import etree
from tongcheng.tc_common import *
from time import *

from common.define import session as Session
from common.logger import get_logger
tc_log = get_logger('tc_log')
from models.tongcheng_model import TCScenery

clientIp = "211.136.149.94"
servicename = "GetSceneryList"
pagesize = 100

_xml = """
<request>
    <header>
        <version>%s</version> 
        <accountID>%s</accountID>
        <serviceName>%s</serviceName> 
        <digitalSign>%s</digitalSign> 
        <reqTime>%s</reqTime>
    </header>
    <body>
        <clientIp>%s</clientIp> 
        <cityId>%d</cityId>     
        <page>%d</page>
        <pageSize>%d</pageSize>
コード例 #43
0
ファイル: mosaic.py プロジェクト: SimoWhatsup/pysurvey
    def __init__(self, survey_conf, mosaic_conf, mtype, species=None, datatype='original', nmsc=0, totmsc=0, path=None):
        """
        Read the fits file: HI, HISA, CO
        To check memory consumption:
            > @profile
            > def function():
            > ...
            > python -m memory_profiler script.py
        :param survey_conf:
        :param mosaic_conf:
        :param mtype: default = brightness_temperature
        :param species:
        :param datatype:
        :param nmsc:
        :param totmsc:
        :param path:
        """
        self.survey = survey_conf['survey']
        self.species = species if species is not None else survey_conf['species']
        self.mosaic = mosaic_conf['mosaic']

        self.mosaic_slug = self.get_mosaic_slug()

        self.logger = get_logger(self.mosaic_slug + '_mosaic')

        self.type = mtype
        self.datatype = datatype
        self.nmsc = nmsc
        self.totmsc = totmsc
        self.path = path

        self.filename, self.mosaic = get_file(self.mosaic_slug, self.datatype, self.nmsc, self.totmsc, self.path)

        # checkForFiles(self.logger, [self.filename])
        self.logger.info('{}'.format(self.filename))

        # Open the file and set the variables
        f = fits.open(self.filename)
        self.keyword = f[0].header

        bscale_flag = False
        if 'bscale' in self.keyword:
            self.bscale = self.keyword['bscale']
            bscale_flag = True
        if 'bzero' in self.keyword and bscale_flag:
            self.bzero = self.keyword['bzero']

        if not ('CROTA1' or 'CROTA2') in self.keyword:
            self.keyword['CROTA1'] = 0.0
            self.keyword['CROTA2'] = 0.0

        # Build arrays
        try:
            self.x, self.y = self.keyword['CRVAL1'], self.keyword['CRVAL2']
            self.dx, self.dy = self.keyword['CDELT1'], self.keyword['CDELT2']
            self.px, self.py = self.keyword['CRPIX1'], self.keyword['CRPIX2']
            self.nx, self.ny = self.keyword['NAXIS1'], self.keyword['NAXIS2']
            self.xarray = self.x + self.dx * (np.arange(self.nx) + 1. - self.px)
            self.yarray = self.y + self.dy * (np.arange(self.ny) + 1. - self.py)
        except:
            self.logger.critical("Some keyword missing. Coordinate arrays cannot be built.")

        if 'ADC_AREA' in self.keyword:
            self.object = self.keyword['ADC_AREA']
            del self.keyword['ADC_AREA']
            self.keyword['OBJECT'] = self.object
        if 'FREQ0' in self.keyword:
            self.band = self.keyword['FREQ0']
            del self.keyword['FREQ0']
            self.keyword['BAND'] = self.band
        elif 'RESTFREQ' in self.keyword:
            self.band = self.keyword['RESTFREQ']
            del self.keyword['RESTFREQ']
            self.keyword['BAND'] = self.band
        elif 'ADC_BAND' in self.keyword:
            self.band = self.keyword['ADC_BAND']
            del self.keyword['ADC_BAND']
            self.keyword['BAND'] = self.band

        if self.keyword['NAXIS'] > 2:
            if not 'CROTA3' in self.keyword:
                self.keyword['CROTA3'] = 0.0
            # Build array
            try:
                self.z = self.keyword['CRVAL3']
                self.dz = self.keyword['CDELT3']
                self.pz = self.keyword['CRPIX3']
                self.nz = self.keyword['NAXIS3']
                self.zarray = self.z + self.dz * (np.arange(self.nz) + 1. - self.pz)
            except:
                self.logger.critical("Some keyword missing. 3rd axis array cannot be built.")

        self.observation = f[0].data
        # free memory
        del f[0]

        self.observation = self.observation.astype(np.float32)

        self.zmin = 1
        self.zmax = self.nz

        if self.type in [config.TB, config.ITB]:
            self.observation[self.observation < -1e4] = 0.
            self.observation[np.isnan(self.observation)] = 0.
            if self.keyword['NAXIS'] > 3:
                if self.survey == 'CGPS':
                    if 'BAND' in self.keyword:
                        if self.keyword['BAND'] == 'HI':
                            self.zmin = 18
                            self.zmax = 271
                            self.observation[:, :self.zmin, :, :] = 0.
                            self.observation[:, self.zmax:, :, :] = 0.
                        if self.keyword['BAND'] == 'CO':
                            self.zmin = 23
                            self.zmax = 256
                            self.observation[:, :self.zmin, :, :] = 0.
                            self.observation[:, self.zmax:, :, :] = 0.
                if self.survey == 'SGPS':
                    self.zmin = 1
                    self.zmax = 410

        self.mosaic = mosaic_conf['mosaic']
        #if not load:
        #    self._inputs = 'Created ' + self.survey + ' Mosaic object ' + self.species + ' ' + self.type
        #else:
        self._inputs = 'Loaded ' + self.survey + ' Mosaic object ' + self.species + ' ' + self.type
コード例 #44
0
# -*- coding:utf-8 _*-
""" 
@author:mongo
@time: 2018/12/17 
@email:[email protected]
@function: Requests封装类,使用一个方法解决多种请求方式的调用
"""

import requests

from common import logger
from common.config import ReadConfig

logger = logger.get_logger('request')


class Request:
    def __init__(self):
        self.session = requests.sessions.session()  # 实例化一个session

    def request(self, method, url, data=None):
        method = method.upper()  # 将字符转成全部大写
        config = ReadConfig()
        pre_url = config.get('api', 'pre_url')  # 拼接
        url = pre_url + url  # URL拼接
        if data is not None and type(data) == str:
            data = eval(data)  # 如果是字符串就转成字典
        logger.info('method: {0}  url: {1}'.format(method, url))
        logger.info('data: {0}'.format(data))
        if method == 'GET':
            resp = self.session.request(method, url=url,
コード例 #45
0
    def __init__(self, mosaic, mosaicConf, utilsConf, rotcurve, scale_data=False):
        """
        Calculate the column density in galacto-centric rings using the rotation curve of M. Phol et al.
        Boundaries of galacto-centric annuli by M.Ackermann et al.
        """
        self.survey = mosaic.survey
        self.mosaic = mosaic.mosaic
        self.species = mosaic.newspec  # specified by the user
        self.type = mosaic.type
        self.datatype = mosaic.datatype
        self.totmsc = mosaic.totmsc
        self.nmsc = mosaic.nmsc

        self.logger = get_logger(self.survey + '_' + self.mosaic + '_' + self.species + '_Deconvolution')
        file, flag, units = '', '', ''
        sur = self.survey.lower()
        HI_all_OR = (self.species == 'HI' or self.species == 'HI_unabsorbed' or self.species == 'HISA')

        path = getPath(self.logger, 'lustre_' + sur + '_' + self.species.lower() + '_column_density')
        if HI_all_OR:
            if self.totmsc == 1:
                flag = self.species + '_column_density_rings'
            else:
                flag = self.species + '_column_density_rings_part_%s-%s' % (self.nmsc, self.totmsc)
            units = '10e+20 H atoms cm-2'
        elif self.species == 'CO':
            flag = 'WCO_intensity_line_rings'
            units = 'K km s-1'

        file = path + self.survey + '_' + self.mosaic + '_' + flag + '.fits'
        checkForFiles(self.logger, [file], existence=True)

        self.logger.info("Open file and get data...")

        # Get HI emission data
        if self.survey == 'LAB':
            Tb = mosaic.observation[:, :, :]
        else:
            Tb = mosaic.observation[0, :, :, :]

        # In case of multiprocessing analysis split the array along the maximum axis
        maxis = 1 + argmax(Tb.shape[-2:])

        lon = mosaic.xarray
        lat = mosaic.yarray

        vel = mosaic.zarray / 1000.
        dv = fabs(mosaic.dz / 1000.)  # [velocity] = km s-1

        # free memory
        del mosaic.observation
        # del mosaic.xarray
        # del mosaic.yarray
        # del mosaic.zarray

        self.Ts = float(utilsConf['tspin'])  # [Excitation (or Spin) Temperature] = K (125-150)

        rmin, rmax, annuli = getAnnuli(glob_annuli)
        if not (rotcurve == 'Bissantz2003' or rotcurve == 'Clemens1985'):
            self.logger.critical(
                "You must enter a correct rotation curve! Options are: 'Bissantz2003' or 'Clemens1985'")
            self.logger.critical("Your entry is %s" % rotcurve)
            sys.exit(0)

        # Array to store results
        cubemap = zeros((annuli, mosaic.ny, mosaic.nx), dtype=float32)

        self.logger.info("Initializing parameters...")
        self.logger.info("1) Ts = %.2f K" % self.Ts)
        self.logger.info("2) dv = %.2f km/s" % dv)
        self.logger.info("3) Tb(min) = %.2f K, Tb(max) = %.2f K" % (amin(Tb), amax(Tb)))
        self.logger.info("4) Rotation curve: '%s'" % rotcurve)
        self.logger.info("5) Annuli: '%s'" % glob_annuli)

        self.logger.info("Calculating gas distribution...")

        # Passing paths to the list
        path_curve = getPath(self.logger, 'rotcurve_mpohl')
        path_conti = ''
        path_unabs = ''
        maxisTc = 0
        if self.species == 'HISA':
            path_conti = getPath(self.logger, self.survey.lower() + '_hi_continuum')
            path_unabs = getPath(self.logger, 'lustre_' + self.survey.lower() + '_hi_unabsorbed')
            # HI continuum
            continuum = path_conti + self.survey + '_' + self.mosaic + '_1420_MHz_I_image.fits'
            Tc, headerc = pyfits.getdata(continuum, 0, header=True)
            Tc[isnan(Tc)] = 0.
            Tc[Tc < 0.] = 0.
            if self.survey == 'CGPS' or self.survey == 'VGPS':
                Tc = Tc[0, 0, :, :]
                maxisTc = maxis - 1
            if self.survey == 'SGPS':
                Tc = Tc[:, :]
                maxisTc = maxis - 1
            # HI unabsorbed
            unabsorbed = path_unabs + self.survey + '_' + self.mosaic + '_HI_unabsorbed_line.fits'
            Tu, headeru = pyfits.getdata(unabsorbed, 0, header=True)
            Tu = Tu[0, :, :, :]

        list = []
        if maxis == 1:
            # list = [self.species,lon,vel,dv,path2,utilsConf,rmin,rmax,rotcurve,maxis]
            list = [path_curve, self.survey, self.mosaic, self.species, lon, vel, mosaic.dy, dv, utilsConf, rmin, rmax,
                    rotcurve, maxis]
            coord = lat
        elif maxis == 2:
            # list = [self.species,lat,vel,dv,path2,utilsConf,rmin,rmax,rotcurve,maxis]
            list = [path_curve, self.survey, self.mosaic, self.species, lat, vel, mosaic.dy, dv, utilsConf, rmin, rmax,
                    rotcurve, maxis]
            coord = lon
        else:
            self.logger.critical("ERROR in splitting Tb!")
            sys.exit(0)

        # Using Multiprocessing if enough cpus are available
        import multiprocessing

        ncpu = glob_ncpu
        # Maximum number of cpus
        if ncpu > 16: ncpu = 16
        # Minimum number of cpus
        if Tb.shape[maxis] < ncpu:
            ncpu = Tb.shape[maxis]

        self.logger.info("Running on %i cpu(s)" % (ncpu))
        if ncpu > 1:
            import itertools
            # arrays = array_split(Tb, ncpu, axis=maxis)
            aTb = array_split(Tb, ncpu, axis=maxis)
            if self.species == 'HISA':
                aTc = array_split(Tc, ncpu, axis=maxisTc)
                aTu = array_split(Tu, ncpu, axis=maxis)
            else:
                aTc = 0. * arange(ncpu)
                aTu = 0. * arange(ncpu)
            # print Tb.shape[maxis],Tu.shape[maxis],Tc.shape[maxisTc]
            coords = array_split(coord, ncpu, axis=0)
            pool = multiprocessing.Pool(processes=ncpu)
            # results = pool.map(Deconvolution, itertools.izip(arrays,coords,itertools.repeat(list)))
            results = pool.map(Deconvolution, itertools.izip(aTb, aTc, aTu, coords, itertools.repeat(list)))
            pool.close()
            pool.join()
            cubemap = concatenate(results, axis=maxis)
            # del arrays
            del coords
            del list
            del results
        else:
            if self.species == 'HISA':
                aTc = Tc
                aTu = Tu
            else:
                aTc = 0.
                aTu = 0.
            cubemap = Deconvolution((Tb, aTc, aTu, coord, list))

        if HI_all_OR: cubemap = cubemap * 1e-20

        # Store results
        newheader = pyfits.Header()
        newheader['ctype1'] = ("GLON-CAR", "Coordinate type")
        newheader['crval1'] = (mosaic.keyword["crval1"], "Galactic longitude of reference pixel")
        newheader['crpix1'] = (mosaic.keyword["crpix1"], "Reference pixel of lon")
        newheader['cdelt1'] = (mosaic.keyword["cdelt1"], "Longitude increment")
        newheader['crota1'] = (mosaic.keyword["crota1"], "Longitude rotation")
        newheader['cunit1'] = ("deg", "Unit type")

        newheader['ctype2'] = ("GLAT-CAR", "Coordinate type")
        newheader['crval2'] = (mosaic.keyword["crval2"], "Galactic latitude of reference pixel")
        newheader['crpix2'] = (mosaic.keyword["crpix2"], "Reference pixel of lat")
        newheader['cdelt2'] = (mosaic.keyword["cdelt2"], "Latitude increment")
        newheader['crota2'] = (mosaic.keyword["crota2"], "Latitude rotation")
        newheader['cunit2'] = ("deg", "Unit type")

        newheader['ctype3'] = ("Rband", "Coordinate type")
        newheader['crval3'] = (0, "Ring of reference pixel")
        newheader['crpix3'] = (1.0, "Reference pixel of ring")
        newheader['cdelt3'] = (1, "Ring increment")
        newheader['crota3'] = (mosaic.keyword["crota3"], "Ring rotation")

        newheader['bunit'] = (units, "Map units")
        newheader['datamin'] = (amin(cubemap), "Min value")
        newheader['datamax'] = (amax(cubemap), "Max value")

        newheader['minfil'] = unravel_index(argmin(cubemap), cubemap.shape)[0]
        newheader['mincol'] = unravel_index(argmin(cubemap), cubemap.shape)[1]
        newheader['minrow'] = unravel_index(argmin(cubemap), cubemap.shape)[2]
        newheader['maxfil'] = unravel_index(argmax(cubemap), cubemap.shape)[0]
        newheader['maxcol'] = unravel_index(argmax(cubemap), cubemap.shape)[1]
        newheader['maxrow'] = unravel_index(argmax(cubemap), cubemap.shape)[2]
        if self.totmsc == 1:
            newheader['object'] = ("Mosaic " + self.mosaic, self.survey + " Mosaic")
        else:
            newheader['object'] = (
            "Mosaic %s (%s/%s)" % (self.mosaic, self.nmsc, self.totmsc), "%s Mosaic (n/tot)" % self.survey)
        newheader.add_history('Rotation curve: %s' % rotcurve)
        newheader.add_history('Annuli: %s' % glob_annuli)
        if not self.species == 'CO':
            newheader.add_history('Spin temperature: %s K' % self.Ts)

        # Output file
        results = pyfits.PrimaryHDU(cubemap, newheader)
        if scale_data:
            self.logger.info("Writing scaled data to a fits file in...")
            results.scale('int16', '', bscale=mosaic.bscale, bzero=mosaic.bzero)
        else:
            self.logger.info("Writing data to a fits file in...")

        # Create a Table with the annuli boundaries
        col1 = pyfits.Column(name='Rmin', format='1E', unit='kpc', array=array(rmin))
        col2 = pyfits.Column(name='Rmax', format='1E', unit='kpc', array=array(rmax))
        cols = pyfits.ColDefs([col1, col2])
        tbl = pyfits.new_table(cols)
        tbl.name = "BINS"

        thdulist = pyfits.HDUList([results, tbl])

        thdulist.writeto(file, output_verify='fix')
        self.logger.info("%s" % path)
        self.logger.info("Done")