Esempio n. 1
0
    def send_node_alert(self, node, msg=''):
        """
        发送节点错误信息
        :param node:
        :param msg:
        :return:
        """
        logger.error("{} {}".format(node.id, msg))
        self_node = models.Node.objects.filter(agent=node.agent,
                                               role='self').first()
        if not self_node or not self_node.notify_when_lose_children:
            # "当下级失联时告警"设置为否时不告警
            return

        title = "中心链接错误"
        node_type = '分中心' if node.type == 'sub_center' else '子中心'
        body = msg if msg else "下级{node_type}-{node_name}({node_ip})无法连接,数据和配置将无法同步,请及时处理".format(
            **{
                "node_type": node_type,
                "node_name": node.name,
                "node_ip": node.ip
            })
        userinfos = node.agent.userinfo_set.filter(is_admin=1)
        user_emails = [ui.user.email for ui in userinfos]
        for email in user_emails:
            send_msg(msg_type=4,
                     to=email,
                     content=body,
                     title=title,
                     agent=node.agent)
Esempio n. 2
0
def exception_error(step, desc, conf):
    """Handle an exception.

    Arguments:
        step: error step
        desc: description of the section error
        conf: configuration object
        run_id: run_id
    """
    exp_info = sys.exc_info()
    exception_msg = str(exp_info[0]) + ' (' + str(exp_info[1]) + ')'
    traceback_msg = traceback.format_exc(exp_info[2])

    short_message = desc + ', catch exception ' + exception_msg
    message = desc + ', catch exception ' + exception_msg + \
              '\n Stacktrace: \n' + traceback_msg

    if step == 'sync':
        sync_run.error(short_message, message, conf)
    elif step == 'demux':
        demux_run.error(short_message, message, conf)
    elif step == 'qc':
        qc_run.error(short_message, message, conf)
    else:
        # Log the exception
        common.log('SEVERE', 'Exception: ' + exception_msg, conf)
        common.log('WARNING', traceback_msg.replace('\n', ' '), conf)

        # Send a mail with the exception
        common.send_msg("[Aozan] Exception: " + exception_msg, traceback_msg,
                        True, conf)
Esempio n. 3
0
def send_command(name: CommandEnum, data: str = None) -> Optional[dict]:
    data = get_command(name, data)

    data = bytes(data, 'utf-8')

    # Публичный ключ передается в не зашифрованном виде
    if name != CommandEnum.SEND_PUBLIC_KEY:
        print(f'[*] Sending raw ({len(data)}): {data}')
        data = DATA['info_security'].encrypt(data)

    print(f'[+] Sending ({len(data)}): {data}')

    send_msg(sock, data)

    print('[+] Receiving...')

    response_data = recv_msg(sock)
    if not response_data:
        return

    print(f'[+] Response ({len(response_data)}): {response_data}')

    # AES ключ, зашифрованный публичным ключом, передается в не зашифрованном виде
    if name != CommandEnum.SEND_PUBLIC_KEY:
        response_data = DATA['info_security'].decrypt(response_data)
        print(f'[*] Response raw ({len(response_data)}): {response_data}')

    rs = json.loads(response_data, encoding='utf-8')

    command = CommandEnum[rs['command']]
    if command != name:
        raise Exception(f'Received response to another command. ')

    return rs
Esempio n. 4
0
def send_command(command: str) -> str:
    with socket.socket() as sock:
        sock.connect((HOST, PORT))

        data = bytes(command, 'utf-8')
        send_msg(sock, data)

        response_data = recv_msg(sock)
        return str(response_data, 'utf-8')
Esempio n. 5
0
    def handle(self):
        print('Connected:', self.client_address)

        data = recv_msg(self.request)
        print('Receiving ({}): {}'.format(len(data), data))

        print('Sending')
        send_msg(self.request, data.upper())

        print('Close\n')
Esempio n. 6
0
    def listen_client(self, client, address):

        while True:
            request = self._get_request(client)

            if request:
                response = self._generate_responce(request)
                send_msg(client, response)

            else:
                self.close_client(client)
                return False
Esempio n. 7
0
    def listen_client(self, client, address):

        while True:
            request = self._get_request(client)
            print("request: {}".format(request))

            if request:
                response = self._generate_responce(request)
                send_msg(client, response)
            else:
                client.close()
                return False
Esempio n. 8
0
 def listenServer(self):
     while True:
         try:
             time.sleep(3)
             send_msg(self.sock, "ping *")
             recv_msg(self.sock)
         except:
             if self.sock._closed:
                 return None
             self.changeStatus(connected=False)
             self.connectionRefused.emit()
             return None
Esempio n. 9
0
def demux_run_standalone(run_id, input_run_data_path, fastq_output_dir, samplesheet_csv_path, nb_mismatch, conf):
    """ Demultiplexing the run with bcl2fastq on version parameter.

    Arguments:
        run_id: The run id
        input_run_data_path: input run data path to demultiplexing
        fastq_output_dir: fastq directory to save result on demultiplexing
        samplesheet_csv_path: samplesheet path in csv format, version used by bcl2fastq
        conf: configuration dictionary
    """

    bcl2fastq_executable_path = conf[BCL2FASTQ_PATH_KEY]
    tmp = conf[TMP_PATH_KEY]

    run_id_msg = " for run " + run_id + ' on ' + common.get_instrument_name(run_id, conf)
    bcl2fastq_log_file = tmp + "/bcl2fastq_output_" + run_id + ".err"

    # Check if the bcl2fastq path is OK
    if os.path.isdir(bcl2fastq_executable_path):
        bcl2fastq_executable_path += '/bcl2fastq'
    elif not os.path.isfile(bcl2fastq_executable_path):
        error("Error while setting executable command file bcl2fastq" + run_id_msg + ", invalid bcl2fastq path: " +
              bcl2fastq_executable_path, "Error while setting executable command file bcl2fastq" + run_id_msg +
              ", invalid bcl2fastq path: " + bcl2fastq_executable_path, conf)
        return False

    cmd = create_bcl2fastq_command_line(run_id, bcl2fastq_executable_path, input_run_data_path, fastq_output_dir,
                                        samplesheet_csv_path, tmp, nb_mismatch, conf)

    common.log('INFO', 'Demultiplexing in standalone mode using the following command line: ' + str(cmd), conf)

    exit_code = os.system(cmd)

    if exit_code != 0:
        error("Error while executing bcl2fastq " + run_id_msg,
              'Error while executing bcl2fastq (exit code: ' + str(
                  exit_code) + ').\nCommand line:\n' + cmd, conf)

        msg = 'Error while executing bcl2fastq ' + run_id_msg + ' (exit code: ' + str(
                  exit_code) + ').\nCommand line:\n' + cmd

        # Check if the log file has been generated
        if not os.path.exists(bcl2fastq_log_file):
            error("Error with bcl2fastq log for run " + run_id + ".", "No bcl2fastq log available", conf)
            common.send_msg('[Aozan] Failed demultiplexing ' + run_id_msg, msg, True, conf)
        else:
            msg += "\n\nPlease check the attached bcl2fastq output error file."
            common.send_msg_with_attachment('[Aozan] Failed demultiplexing ' + run_id_msg, msg, bcl2fastq_log_file, True, conf)

        return False

    return True
Esempio n. 10
0
 def _send_message(self):
     logging.info('message sender started.')
     while not self._stopped:
         try:
             remote_addr, msg = self._sendq.get()
             sock = self._agentsocks.get(remote_addr, None)
             if sock is None:
                 logging.warn('agent socket not found by %s', remote_addr)
             else:
                 logging.info('pub msg to agent %s', remote_addr)
                 send_msg(sock, msg)
         except Exception as e:
             logging.exception('error while send message to %s',
                               remote_addr)
             self.stop_agent(remote_addr)
     logging.info('message sender stopped')
Esempio n. 11
0
def process_connect(conn, addr):
    print(f"[+] New connection from {addr}")

    try:
        while True:
            data = recv_msg(conn)
            if not data:
                break

            print(f'[+] Receiving ({len(data)}): {data}')

            # Проверка, что этот запрос уже не первый, т.к. то, что AES уже есть
            # и что, нужно расшифровавывать запрос
            is_existing_connect = conn in CONNECTION_BY_KEY
            if is_existing_connect:
                data = CONNECTION_BY_KEY[conn].decrypt(data)
                print(f'[*] Receiving raw ({len(data)}): {data}')

                rs = process_command(data, conn, addr)

                print(f'[*] Sending raw ({len(rs)}): {rs}')
                rs = CONNECTION_BY_KEY[conn].encrypt(rs)

            else:
                key_AES = rsa.decrypt(data, PRIVATE_KEY)
                print('key_AES:', key_AES)

                CONNECTION_BY_KEY[conn] = InfoSecurity(key_AES)
                rs = b''

            print(f'[+] Sending ({len(rs)}): {rs}')
            send_msg(conn, rs)

            print()

    except:
        import traceback
        print(traceback.format_exc())

    finally:
        conn.close()

        if conn in CONNECTION_BY_KEY:
            CONNECTION_BY_KEY.pop(conn)

        print(f"[+] Closed connection from {addr}")
        print()
Esempio n. 12
0
def send_mail_if_critical_free_space_available(conf):
    """Check if disk free space is critical. If true send a mail.

    Arguments:
        conf: configuration dictionary
    """

    for path in get_hiseq_data_paths(conf):

        if os.path.exists(path):
            df = common.df(path)
            free_space_threshold = long(conf[HISEQ_CRITICAL_MIN_SPACE_KEY])
            if df < free_space_threshold:
                common.send_msg('[Aozan] Critical: Not enough disk space on sequencer storage for current run',
                                'There is only %.2f' % (df / (1024 * 1024 * 1024)) +
                                ' Gb left for run storage in ' + path + '. ' +
                                ' The current warning threshold is set to %.2f' % (
                                    free_space_threshold / (1024 * 1024 * 1024)) + ' Gb.', False, conf)
def send_failed_run_message(run_id, secs, conf):
    """Send a mail to inform about a failed run.

    Arguments:
        conf: configuration dictionary
    """

    run_path = hiseq_run.find_hiseq_run_path(run_id, conf)
    file_to_test = run_path + '/' + run_id + '/RTAComplete.txt'
    last = os.stat(file_to_test).st_mtime

    df = common.df(run_path) / (1024 * 1024 * 1024)
    du = common.du(run_path + '/' + run_id) / (1024 * 1024 * 1024)

    common.send_msg('[Aozan] Failed run ' + run_id + ' on ' + common.get_instrument_name(run_id, conf),
                    'A run (' + run_id + ') has failed on ' + common.get_instrument_name(run_id, conf) +
                    ' at ' + common.time_to_human_readable(last) + '.\n' + 'Data for this run can be found at: ' +
                    run_path + '\n\nFor this task %.2f GB has been used and %.2f GB still free.' % (du, df),
                    False, conf)
Esempio n. 14
0
 def _do_write(self, sock):
     while not self._queue.empty():
         try:
             msg = self._queue.get_nowait()
         except Q.Empty:
             logging.warn('Try to get msg from empty queue..')
             return
         msg.set_header(msg.H_SEND_AT, datetime.utcnow())
         size, times = send_msg(sock, msg)
         logging.info('msg %s sent to %s use %d times', msg,
                      self._master_addr, times)
         logging.debug('msg data = %s', msg.body)
Esempio n. 15
0
def send_command(data: bytes = None, key: InfoSecurity = None) -> Optional[dict]:
    print(f'[+] Sending raw ({len(data)}): {data}')

    if key:
        data = key.encrypt(data)
        print(f'[+] Sending ({len(data)}): {data}')

    send_msg(sock, data)

    print('[+] Receiving...')

    response_data = recv_msg(sock)
    if not response_data:
        return

    print(f'[+] Response ({len(response_data)}): {response_data}')

    if key:
        response_data = key.decrypt(response_data)
        print(f'[*] Response raw ({len(response_data)}): {response_data}')

    rs = json.loads(response_data, encoding='utf-8')
    return rs
def send_mail_if_recent_run(run_id, secs, conf):
    """Send an email to inform that a new run is finished.

    Arguments:
        run_id: run id
        secs: maximum delay since the end of the run
        conf: configuration object
    """

    run_path = hiseq_run.find_hiseq_run_path(run_id, conf)
    if run_path is False:
        return

    last = hiseq_run.check_end_run_since(run_id, secs, conf)

    if last > 0:
        df = common.df(run_path) / (1024 * 1024 * 1024)
        du = common.du(run_path + '/' + run_id) / (1024 * 1024 * 1024)
        common.send_msg('[Aozan] Ending run ' + run_id + ' on ' + common.get_instrument_name(run_id, conf),
                        'A new run (' + run_id + ') is finished on ' +
                        common.get_instrument_name(run_id, conf) + ' at ' + common.time_to_human_readable(
                            last) + '.\n' +
                        'Data for this run can be found at: ' + run_path +
                        '\n\nFor this task %.2f GB has been used and %.2f GB still free.' % (du, df), False, conf)
Esempio n. 17
0
 def sendRequest(self, request):
     send_msg(self.sock, request)
     response = recv_msg(self.sock)
     return response
Esempio n. 18
0
if __name__ == '__main__':
    parser = opt_parser()
    opt = parser.parse_args()

    config = load_config(opt.net_config)
    #pprint(config)

    # Start server here
    server = Server(host=opt.host, port=opt.port, size=len(config))
    server.start()
    time.sleep(0.1)

    fetch_msg = encode(dict(type="extract", host=opt.host, port=opt.port))
    for node in config.values():
        send_msg(fetch_msg, host=node['host'], port=node['port'])
        #print(node)
    time.sleep(0.1)

    # wait for server here
    server.join()

    id_table = dict()
    while not server.responses.empty():
        resp = server.responses.get()
        print(repr(resp))
        id_table[resp['id']] = resp

    graph = as_nx(id_table)
    nx.draw(graph, with_labels=True)
    plt.draw()
Esempio n. 19
0
def demux(run_id, conf):
    """Add a processed run id to the list of the run ids.

    Arguments:
        run_id: The run id
        conf: configuration dictionary
    """

    start_time = time.time()
    common.log('INFO', 'Demux step: Starting', conf)

    reports_data_base_path = conf[REPORTS_DATA_PATH_KEY]
    reports_data_path = common.get_report_run_data_path(run_id, conf)

    samplesheet_filename = build_samplesheet_filename(run_id, conf)
    bcl2fastq_samplesheet_path = conf[TMP_PATH_KEY] + '/' + samplesheet_filename + '.csv'

    input_run_data_path = common.get_input_run_data_path(run_id, conf)

    if input_run_data_path is None:
        return False

    fastq_output_dir = conf[FASTQ_DATA_PATH_KEY] + '/' + run_id

    basecall_stats_prefix = 'basecall_stats_'
    basecall_stats_file = basecall_stats_prefix + run_id + '.tar.bz2'

    # Check if root input bcl data directory exists
    if not os.path.exists(input_run_data_path):
        error("Basecalling data directory does not exist",
              "Basecalling data directory does not exist: " + str(input_run_data_path), conf)
        # return False

    # Check if root input fastq data directory exists
    if not common.is_dir_exists(FASTQ_DATA_PATH_KEY, conf):
        error("FASTQ data directory does not exist",
              "FASTQ data directory does not exist: " + conf[FASTQ_DATA_PATH_KEY], conf)
        return False

    # Check if bcl2fastq samplesheets path exists
    if not common.is_dir_exists(BCL2FASTQ_SAMPLESHEETS_PATH_KEY, conf):
        error("Bcl2fastq samplesheet directory does not exist",
              "Bcl2fastq samplesheet directory does not exist: " + conf[BCL2FASTQ_SAMPLESHEETS_PATH_KEY], conf)
        return False

    # Check if bcl2fastq basedir path exists
    if not common.is_conf_value_equals_true(BCL2FASTQ_USE_DOCKER_KEY, conf):
        if not common.is_dir_exists(BCL2FASTQ_PATH_KEY, conf):
            error("Bcl2fastq directory does not exist",
                  "Bcl2fastq directory does not exist: " + conf[BCL2FASTQ_PATH_KEY], conf)
            return False

    # Check if temporary directory exists
    if not common.is_dir_exists(TMP_PATH_KEY, conf):
        error("Temporary directory does not exist",
              "Temporary directory does not exist: " + conf[TMP_PATH_KEY], conf)
        return False

    # Check if reports_data_path exists
    if not os.path.exists(reports_data_base_path):
        error("Report directory does not exist",
              "Report directory does not exist: " + reports_data_base_path, conf)
        return False

    # Create if not exist report directory for the run
    if not os.path.exists(reports_data_path):
        os.mkdir(reports_data_path)

    # Check if basecall stats archive exists
    if os.path.exists(reports_data_path + '/' + basecall_stats_file):
        error('Basecall stats archive already exists for run ' + run_id,
              'Basecall stats archive already exists for run ' + run_id + ': ' + basecall_stats_file, conf)
        return False

    # Check if the output directory already exists
    if os.path.exists(fastq_output_dir):
        error("FASTQ output directory already exists for run " + run_id,
              'FASTQ output directory already exists for run ' + run_id + ': ' + fastq_output_dir, conf)
        return False

    # Compute disk usage and disk free to check if enough disk space is available
    input_path_du = common.du(input_run_data_path)
    output_df = common.df(conf[FASTQ_DATA_PATH_KEY])
    du_factor = float(conf[DEMUX_SPACE_FACTOR_KEY])
    space_needed = input_path_du * du_factor

    common.log("WARNING", "Demux step: input disk usage: " + str(input_path_du), conf)
    common.log("WARNING", "Demux step: output disk free: " + str(output_df), conf)
    common.log("WARNING", "Demux step: space needed: " + str(space_needed), conf)

    common.log("CONFIG", "Bcl2fastq Docker mode: " + str(
        common.is_conf_value_equals_true(Settings.BCL2FASTQ_USE_DOCKER_KEY, conf)), conf)

    # Check if free space is available
    if output_df < space_needed:
        error("Not enough disk space to perform demultiplexing for run " + run_id,
              "Not enough disk space to perform demultiplexing for run " + run_id +
              '.\n%.2f Gb' % (space_needed / 1024 / 1024 / 1024) + ' is needed (factor x' + str(
                  du_factor) + ') on ' + fastq_output_dir + '.', conf)
        return False

    # Load RunInfo object
    run_info = RunInfo.parse(input_run_data_path + '/RunInfo.xml')

    # Load samplesheet
    samplesheet, original_samplesheet_path = load_samplesheet(run_id, input_run_data_path, samplesheet_filename, conf)

    if samplesheet is None:
        return False

    # Update samplesheet
    if not update_samplesheet(samplesheet, run_id, run_info.getFlowCellLaneCount(), conf):
        return False

    # Check samplesheet
    check_result, samplesheet_warnings = check_samplesheet(samplesheet, run_id, run_info.getFlowCell(), conf)
    if not check_result:
        return False

    # Get the number of mismatches
    nb_mismatch = get_bcl2fastq_mismatches(samplesheet, conf[BCL2FASTQ_MISMATCHES_KEY])

    # Write final samplesheet
    if not write_bcl2fastq_samplesheet(samplesheet, bcl2fastq_samplesheet_path, conf):
        return False

    # Run demultiplexing
    if common.is_conf_value_equals_true(Settings.BCL2FASTQ_USE_DOCKER_KEY, conf):
        # With image docker
        if not demux_run_with_docker(run_id, input_run_data_path, fastq_output_dir, bcl2fastq_samplesheet_path,
                                     nb_mismatch, conf):
            return False
    else:
        if not demux_run_standalone(run_id, input_run_data_path, fastq_output_dir, bcl2fastq_samplesheet_path,
                                    nb_mismatch, conf):
            return False

    # Check if the output directory has been created
    if not os.path.exists(fastq_output_dir):
        error("Error while demultiplexing run " + run_id + ' on ' + common.get_instrument_name(run_id, conf),
              'Error while demultiplexing run ' + run_id + '.\n' +
              'The output directory of bcl2fastq has been created: ' + fastq_output_dir, conf)
        return False

    # Check if the output directory has been created
    if os.path.isfile(fastq_output_dir):
        error("Error while demultiplexing run " + run_id + ' on ' + common.get_instrument_name(run_id, conf),
              'Error while demultiplexing run ' + run_id + '.\n' +
              'The output directory of bcl2fastq is a file instead of a directory: ' + fastq_output_dir, conf)
        return False

    # Copy bcl2fastq log to output directory
    cmd = 'cp ' + quote(conf[TMP_PATH_KEY]) + '/bcl2fastq_output_' + run_id + '.* ' + quote(fastq_output_dir)
    common.log("INFO", "exec: " + cmd, conf)
    if os.system(cmd) != 0:
        error("Error while copying bcl2fastq log to the output FASTQ directory" + run_id_msg,
              'Error while copying bcl2fastq log to the output FASTQ directory.\nCommand line:\n' + cmd, conf)
        return False

    # The output directory must be read only
    if not common.chmod_files_in_dir(fastq_output_dir, ".fastq", conf):
        error("Error while setting the output FASTQ directory to read only" + run_id_msg,
              'Error while setting the output FASTQ directory to read only.\nCommand line:\n' + cmd, conf)
        return False


    if not check_if_output_fastq_files_exists(fastq_output_dir):
        error("Error with bcl2fastq execution for run " + run_id,
              "Error with bcl2fastq execution for run " + run_id + " no FASTQ file found in " + fastq_output_dir,
              conf)
        return False

    # Copy samplesheet to output directory
    cmd = 'cp -p ' + quote(bcl2fastq_samplesheet_path) + ' ' + quote(fastq_output_dir + '/SampleSheet.csv')
    common.log("INFO", "exec: " + cmd, conf)
    if os.system(cmd) != 0:
        error("Error while copying samplesheet file to FASTQ directory for run " + run_id,
              'Error while copying samplesheet file to FASTQ directory.\nCommand line:\n' + cmd, conf)
        return False

    # Create archives on demultiplexing statistics
    if not archive_demux_stat(run_id, fastq_output_dir, reports_data_path, basecall_stats_file,
                              basecall_stats_prefix, bcl2fastq_samplesheet_path, conf):
        return False

    # Archive samplesheet
    if not archive_samplesheet(run_id, original_samplesheet_path, bcl2fastq_samplesheet_path, conf):
        return False

    # Remove temporary samplesheet files
    if os.path.exists(bcl2fastq_samplesheet_path):
        os.remove(bcl2fastq_samplesheet_path)

    # Create index.hml file
    common.create_html_index_file(conf, run_id, [Settings.HISEQ_STEP_KEY, Settings.DEMUX_STEP_KEY])

    df_in_bytes = common.df(fastq_output_dir)
    du_in_bytes = common.du(fastq_output_dir)
    df = df_in_bytes / (1024 * 1024 * 1024)
    du = du_in_bytes / (1024 * 1024 * 1024)

    common.log("WARNING", "Demux step: output disk free after demux: " + str(df_in_bytes), conf)
    common.log("WARNING", "Demux step: space used by demux: " + str(du_in_bytes), conf)

    duration = time.time() - start_time

    msg = 'Ending demultiplexing with ' + nb_mismatch + ' mismatch(es) for run ' + run_id + '.' + \
          '\nJob finished at ' + common.time_to_human_readable(time.time()) + \
          ' without error in ' + common.duration_to_human_readable(duration) + '.\n\n' + \
          'FASTQ files for this run ' + \
          'can be found in the following directory:\n  ' + fastq_output_dir

    if samplesheet_warnings.size() > 0:
        msg += '\n\nSamplesheet warnings:'
        for warn in samplesheet_warnings:
            msg += "\n  - " + warn

    # Add path to report if reports.url exists
    if common.is_conf_key_exists(REPORTS_URL_KEY, conf):
        msg += '\n\nRun reports can be found at following location:\n  ' + conf[REPORTS_URL_KEY] + '/' + run_id

    msg += '\n\nFor this task %.2f GB has been used and %.2f GB still free.' % (du, df)

    common.send_msg('[Aozan] Ending demultiplexing for run ' + run_id + ' on ' +
                    common.get_instrument_name(run_id, conf), msg, False, conf)
    common.log('INFO', 'Demux step: successful in ' + common.duration_to_human_readable(duration), conf)

    return True
Esempio n. 20
0
from common import send_msg, recv_msg

PORT = 9090

with socket.socket() as sock:
    sock.bind(('', 9090))
    sock.listen(1)

    print('Server: {}'.format(sock.getsockname()))

    while True:
        conn, addr = sock.accept()
        print('Connected:', addr)

        data = recv_msg(conn)
        print('Receiving ({}): {}'.format(len(data), data))

        json_data = json.loads(data, encoding='utf-8')
        print('json_data:', json_data)

        json_data['title'] = 'updates'
        json_data['counter'] += 1

        data = json.dumps(json_data)
        print('Sending: {}'.format(data))

        rs = bytes(data, 'utf-8')
        send_msg(conn, rs)

        print('Close\n')
Esempio n. 21
0
def sync(run_id, conf):
    """Synchronize a run.

    Arguments:
        run_id: the run id
        conf: configuration dictionary
    """

    start_time = time.time()
    common.log('INFO', 'Sync step: Starting', conf)

    bcl_data_path = conf[BCL_DATA_PATH_KEY]
    reports_data_base_path = conf[REPORTS_DATA_PATH_KEY]
    output_path = bcl_data_path + '/' + run_id

    # check if rsync exists in PATH
    if not common.exists_in_path("rsync"):
        error("Can't find all needed commands in PATH env var",
              "Can't find all needed commands in PATH env var. Unable to find: rsync command.", conf)
        return False

    # Check if reports_data_path exists
    if not os.path.exists(reports_data_base_path):
        error("Report directory does not exist", "Report directory does not exist: " + reports_data_base_path, conf)
        return False

    # Check if enough space to store reports
    if common.df(reports_data_base_path) < 10 * 1024 * 1024 * 1024:
        error("Not enough disk space to store aozan reports for run " + run_id,
              "Not enough disk space to store aozan reports for run " + run_id +
              '.\nNeed more than 10 Gb on ' + reports_data_base_path + '.', conf)
        return False

    # Do the synchronization
    if not partial_sync(run_id, True, conf):
        return False

    # Rename partial sync directory to final run BCL directory
    if os.path.exists(output_path + '.tmp'):
        os.rename(output_path + '.tmp', output_path)

    # Check used and free space
    df_in_bytes = common.df(bcl_data_path)
    du_in_bytes = common.du(output_path)
    df = df_in_bytes / (1024 * 1024 * 1024)
    du = du_in_bytes / (1024 * 1024 * 1024)

    common.log("WARNING", "Sync step: output disk free after sync: " + str(df_in_bytes), conf)
    common.log("WARNING", "Sync step: space used by sync: " + str(du_in_bytes), conf)

    duration = time.time() - start_time

    msg = 'Ending synchronization for run ' + run_id + '.\n' + \
          'Job finished at ' + common.time_to_human_readable(time.time()) + \
          ' without error in ' + common.duration_to_human_readable(duration) + '.\n\n' + \
          'Run output files (without .cif files) can be found in the following directory:\n  ' + output_path

    # Add path to report if reports.url exists
    if common.is_conf_key_exists(REPORTS_URL_KEY, conf):
        msg += '\n\nRun reports can be found at following location:\n  ' + conf[REPORTS_URL_KEY] + '/' + run_id

    msg += '\n\nFor this task %.2f GB has been used and %.2f GB is still free.' % (du, df)

    common.send_msg('[Aozan] Ending synchronization for run ' + run_id + ' on ' +
                    common.get_instrument_name(run_id, conf), msg, False, conf)
    common.log('INFO', 'sync step: successful in ' + common.duration_to_human_readable(duration), conf)
    return True
Esempio n. 22
0
    print('Server: {}'.format(sock.getsockname()))

    while True:
        conn, addr = sock.accept()
        print('Connected:', addr)

        data = recv_msg(conn)
        print('Receiving {} bytes'.format(len(data)))

        img = Image.open(io.BytesIO(data))
        print('Receiving image:', img)

        print('Transform image in thumbnail')

        # Transform in thumbnail
        img.thumbnail((75, 75))

        print('Img:', img)

        # Write thumbnail in buffer
        data_io = io.BytesIO()
        img.save(data_io, 'jpeg')

        response_data = data_io.getvalue()

        print('Sending {} bytes'.format(len(response_data)))

        send_msg(conn, response_data)

        print('Close\n')
Esempio n. 23
0
icred = gss.gssCred()
icred.acquire(None, 3600, [gss.GSS_MECH_KRB5], gss.GSS_PY_INITIATE)

target = gss.gssName()
target.import_name('host@linuxbox', gss.GSS_NT_SERVICE_NAME)

ictx = gss.gssContext()
itoken = gss.GSS_PY_NO_BUFFER
while (1):
    (cont_needed, otoken) = ictx.init(
        icred, target, gss.GSS_MECH_KRB5,
        gss.GSS_PY_MUTUAL_FLAG | gss.GSS_PY_CONF_FLAG | gss.GSS_PY_INTEG_FLAG,
        0, itoken)
    if not cont_needed: break
    common.send_msg(s, otoken)
    itoken = common.recv_msg(s)

print "\ncontext info"
print "-=-=-=-=-=-=-=-=-"
ctxinfo = ictx.inquire()
(src_name, targ_name, time_rec, mech_type, context_flags) = ctxinfo
print "initator: %s" % src_name.display()
print "acceptor: %s" % targ_name.display()
print "Context time: %d" % time_rec
print "mech: %s" % common.b2a(mech_type)
print "context flags: %04x" % context_flags

# wrap test

for message in holygrail.quotes.keys():
Esempio n. 24
0
    def run(self):
        while True:
            if not incoming_queue.empty():
                msg = incoming_queue.get()
                logging.debug(
                    'Processing message: {}\nCurrent recvd: {}\nCurrent status: {}\nCoord so far: {}\nPort to coord: {}'
                    .format(repr(msg), self.recd_reply, self.status,
                            self.coord_so_far, self.port_to_coord))
                logging.debug(''.format(self.recd_reply))
                if msg['type'] == 'start':
                    print('Activating in a second')
                    time.sleep(2.0)
                    self._heartbeat()
                elif msg['type'] == 'reconfig':
                    self.recd_reply[self.port_to(int(
                        msg['id']))] = 'no_contention'
                    self._on_reconfig(msg['node_list'], int(msg['frag_id']),
                                      int(msg['id']))
                elif msg['type'] == 'no_contention':
                    if self.status == 'wait':
                        self.recd_reply[self.port_to(int(
                            msg['id']))] = 'no_contention'
                        if len(self.recd_reply) == len(self._ports):
                            self._on_everybody_responded()
                elif msg['type'] == 'accept':
                    if self.status == 'wait':
                        self.recd_reply[self.port_to(int(
                            msg['id']))] = 'accepted'
                        if len(self.recd_reply) == len(self._ports):
                            self._on_everybody_responded()
                elif msg['type'] == 'stop':
                    if self.status == 'wait':
                        self._on_stop(int(msg['frag_id']), int(msg['id']))
                elif msg['type'] == 'fail':
                    # set status of the node
                    # TODO: This might be an issue
                    self.status = 'wait'
                    self.coord_so_far = self.id
                    self.port_to_coord = None  # What to put here???

                    # remove the failed edge from our MST
                    self.remove_edge(int(msg['id']))
                    # send reconfiguration request through all the ports
                    for dest_id in set(self._ports.values()).difference(
                        [int(msg['id'])]):
                        self._con.send(id=dest_id,
                                       msg=dict(type='reconfig',
                                                node_list=[self.id],
                                                frag_id=self.id))
                elif msg['type'] == 'extract':
                    host = msg['host']
                    port = msg['port']
                    graph_msg = encode(
                        dict(id=self.id,
                             links=list(self._ports.values()),
                             edges=[
                                 self._ports[port_id]
                                 for port_id in self._edges
                             ]))

                    send_msg(msg=graph_msg, host=host, port=port)
Esempio n. 25
0
    def run(self):
        while True:
            if not incoming_queue.empty():
                msg = incoming_queue.get()
                if msg['type'] == 'start':
                    time.sleep(2.0)
                    self._heartbeat()
                elif msg['type'] == 'reconfig':
                    sender_id = int(msg['id'])
                    frag_id = int(msg['frag_id'])
                    if self.status == 'idle':
                        self.status = 'wait'
                        failed_id = int(msg['failed_node'])
                        self.coord_so_far = sender_id
                        self.port_to_coord = self.port_to(sender_id)

                        can_communicate = list(
                            set(self._ports.values()).difference(
                                [sender_id, failed_id]))
                        if len(can_communicate) == 0:
                            self._con.send(sender_id,
                                           dict(type='no_contention'))
                        else:
                            for node_id in can_communicate:
                                self.recd_reply[node_id] = None
                                self._con.send(
                                    node_id,
                                    dict(type='reconfig',
                                         node_list=msg['node_list'] +
                                         [self.id],
                                         frag_id=msg['frag_id'],
                                         failed_node=failed_id))
                    else:
                        e = self.port_to(sender_id)

                        # Message is the copy of message recieved earlier
                        if (frag_id == self.coord_so_far) and (
                                e not in self.get_port()):
                            logging.debug('Message was recieved earlier')
                            self._con.send(self._ports[e],
                                           dict(type='no_contention'))
                            continue

                        # Detected loop
                        if self.id in msg['node_list']:
                            logging.debug('Loop is detected')
                            self._con.send(self._ports[e],
                                           dict(type='no_contention'))
                            continue

                        # Resolve contention
                        if (self.coord_so_far > frag_id) or (
                            (self.coord_so_far == frag_id) and
                            (self.id > sender_id)):
                            logging.debug('Sending out stop message')
                            self._con.send(
                                sender_id,
                                dict(type='stop', frag_id=self.coord_so_far))
                        else:
                            self.coord_so_far = frag_id
                            if self.port_to_coord is not None:
                                logging.debug('Sending out stop message')
                                self._con.send(
                                    self._ports[self.port_to_coord],
                                    dict(type='stop', frag_id=frag_id))
                            self.port_to_coord = self.port_to(sender_id)
                elif msg['type'] == 'no_contention':
                    sender_id = int(msg['id'])
                    if sender_id in self.recd_reply.keys():
                        self.recd_reply[sender_id] = 'no_contention'
                    if not there_is_a_none(self.recd_reply):
                        self._on_everybody_responded()
                elif msg['type'] == 'accepted':
                    sender_id = int(msg['id'])
                    if sender_id in self.recd_reply.keys():
                        self.recd_reply[sender_id] = 'accepted'
                    if not there_is_a_none(self.recd_reply):
                        self._on_everybody_responded()
                elif msg['type'] == 'stop':
                    frag_id = msg['frag_id']
                    from_id = msg['id']
                    p = self.port_to(from_id)
                    if frag_id > self.coord_so_far:
                        self.coord_so_far = frag_id
                        if self.port_to_coord is not None:
                            self._con.send(self._ports[self.port_to_coord],
                                           dict(type='stop', frag_id=frag_id))
                        self.port_to_coord = p
                    if frag_id == self.coord_so_far:
                        if self.port_to_coord not in self.get_port():
                            if self.port_to_coord is not None:
                                self._con.send(self._ports[self.port_to_coord],
                                               dict(type='no_contention'))
                            self.recd_reply[
                                self.port_to_coord] = 'no_contention'
                            if not there_is_a_none(self.recd_reply):
                                self._on_everybody_responded()
                        else:
                            self._con.send(self._ports[self.port_to_coord],
                                           dict(type='stop', frag_id=frag_id))
                        self.port_to_coord = p
                    if frag_id < self.coord_so_far:
                        self._con.send(
                            self._ports[p],
                            dict(type='stop', frag_id=self.coord_so_far))
                elif msg['type'] == 'fail':
                    failed_node = int(msg['id'])
                    self.remove_edge(failed_node)
                    self.status = 'wait'
                    self.coord_so_far = self.id
                    self.port_to_coord = None
                    for node_id in set(self._ports.values()).difference(
                        [failed_node]):
                        self.recd_reply[node_id] = None
                        self._con.send(
                            node_id,
                            dict(type='reconfig',
                                 node_list=[self.id],
                                 frag_id=self.id,
                                 failed_node=failed_node))
                elif msg['type'] == 'extract':
                    host = msg['host']
                    port = msg['port']
                    graph_msg = encode(
                        dict(id=self.id,
                             links=list(self._ports.values()),
                             edges=[
                                 self._ports[port_id]
                                 for port_id in self._edges
                             ]))

                    send_msg(msg=graph_msg, host=host, port=port)
Esempio n. 26
0
import socket
import json
from common import send_msg, recv_msg

s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname()

port = 9999
s.connect((host, port))

msg = "abs"  #.encode('utf-8')

send_msg(s, msg)
data = recv_msg(s)

s.close()
print(data)
Esempio n. 27
0
def demux_run_with_docker(run_id, input_run_data_path, fastq_output_dir, samplesheet_csv_path, nb_mismatch, conf):
    """ Demultiplexing the run with bcl2fastq on version parameter with image Docker.

    Arguments:
        run_id: The run id
        input_run_data_path: input run data path to demultiplexing
        fastq_output_dir: fastq directory to save result on demultiplexing
        samplesheet_csv_path: samplesheet path in csv format, version used by bcl2fastq
        conf: configuration dictionary
    """

    # In docker mount with input_run_data_path
    input_docker = '/data/input'
    input_run_data_path_in_docker = input_docker
    run_id_msg = " for run " + run_id + ' on ' + common.get_instrument_name(run_id, conf)

    # In docker mount with fastq_output_dir
    output_docker = '/data/output'
    fastq_data_path_in_docker = output_docker + '/' + os.path.basename(fastq_output_dir)

    tmp = conf[TMP_PATH_KEY]
    tmp_docker = '/tmp'

    bcl2fastq_log_file = tmp + "/bcl2fastq_output_" + run_id + ".err"
    samplesheet_csv_docker = tmp_docker + '/' + os.path.basename(samplesheet_csv_path)

    cmd = create_bcl2fastq_command_line(run_id, None, input_run_data_path_in_docker, fastq_data_path_in_docker,
                                        samplesheet_csv_docker, tmp_docker, nb_mismatch, conf)

    try:
        # Set working in docker on parent demultiplexing run directory.
        # Demultiplexing run directory will create by bcl2fastq
        docker = DockerCommand(conf[Settings.DOCKER_URI_KEY], ['/bin/bash', '-c', cmd], 'bcl2fastq2', common.BCL2FASTQ2_VERSION)

        common.log("CONFIG", "Demultiplexing using docker image from " + docker.getImageDockerName() +
                   " with command line " + cmd, conf)

        common.log("CONFIG", "Bcl2fastq docker mount: " +
                   str(os.path.dirname(fastq_output_dir)) + ":" + str(output_docker) + "; " +
                   input_run_data_path + ":" + input_docker + "; " + tmp + ":" + tmp_docker, conf)

        # Mount input directory
        docker.addMountDirectory(input_run_data_path, input_docker)
        docker.addMountDirectory(os.path.dirname(fastq_output_dir), output_docker)
        docker.addMountDirectory(tmp, tmp_docker)

        docker.run()
        exit_code = docker.getExitValue()

        if exit_code != 0:
            error("Error while demultiplexing run " + run_id, 'Error while demultiplexing run (exit code: ' +
                  str(exit_code) + ').\nCommand line:\n' + cmd, conf)

            msg = 'Error while executing bcl2fastq ' + run_id_msg + ' (exit code: ' + str(
                  exit_code) + ').\nCommand line:\n' + cmd

            # Check if the log file has been generated
            if not os.path.exists(bcl2fastq_log_file):
                error("Error with bcl2fastq log for run " + run_id + ".", "No bcl2fastq log available " + bcl2fastq_log_file, conf)
                common.send_msg('[Aozan] Failed demultiplexing ' + run_id_msg, msg, True, conf)
            else:
                msg += "\n\nPlease check the attached bcl2fastq output error file."
                common.send_msg_with_attachment('[Aozan] Failed demultiplexing ' + run_id_msg, msg, bcl2fastq_log_file, True, conf)

            return False

    except Throwable, exp:
        error("Error while running Docker image", common.exception_msg(exp, conf), conf)
        return False
Esempio n. 28
0
#!/usr/bin/env python3
import argparse
from pprint import pprint

from common import load_config
from common import encode
from common import send_msg


def opt_parser():
    parser = argparse.ArgumentParser(
        description='Network reconfiguration node')
    parser.add_argument('--net_config',
                        default='config/sample_graph3.json',
                        type=str)
    return parser


if __name__ == '__main__':
    parser = opt_parser()
    opt = parser.parse_args()

    config = load_config(opt.net_config)
    pprint(config)

    start_msg = encode(dict(type="start"))
    for node in config.values():
        send_msg(start_msg, host=node['host'], port=node['port'])
        print(node)
Esempio n. 29
0
# -*- coding: utf-8 -*-

__author__ = 'ipetrash'

import socket

import sys
sys.path.append('..')

from common import send_msg, recv_msg

PORT = 9090

with socket.socket() as sock:
    sock.bind(('', PORT))
    sock.listen()

    print('Server: {}'.format(sock.getsockname()))

    while True:
        conn, addr = sock.accept()
        print('Connected:', addr)

        data = recv_msg(conn)
        print('Receiving ({}): {}'.format(len(data), data))

        print('Sending')
        send_msg(conn, data.upper())

        print('Close\n')
Esempio n. 30
0
sys.path.append('..')

from common import send_msg, recv_msg

HOST, PORT = "localhost", 9090

with socket.socket() as sock:
    sock.connect((HOST, PORT))

    # Send file
    with open('img.jpg', 'rb') as f:
        data = f.read()

    print('Sending {} bytes'.format(len(data)))
    print()

    send_msg(sock, data)

    print('Receiving')

    response_data = recv_msg(sock)
    print('Response {} bytes'.format(len(response_data)))

    file_name = 'img_thumbnail.jpg'
    print('Save in ' + file_name)

    with open(file_name, 'wb') as f:
        f.write(response_data)

    print('Close\n')
def send_report(run_id, conf):
    """Send a mail with the first base report.

    Arguments:
        run_id: the run id
        conf: configuration dictionary
    """

    #
    # Retrieve features the current run in RunInfos.xml file
    #

    run_info = hiseq_run.get_run_info(run_id, conf)

    if run_info is None:
        return False

    # TODO ?? add check sample-sheet if demux step enable
    # add warning in report if useful

    reads = run_info.getReads()
    error_cycles_per_read_not_indexes_count = 0
    reads_indexed_count = 0
    reads_not_indexed_count = 0
    cycles_count = 0
    cycles_per_read_not_indexed = 0

    for read in reads:
        cycles_count += read.getNumberCycles()
        if read.isIndexedRead():
            reads_indexed_count += 1
        else:
            reads_not_indexed_count += 1
            if cycles_per_read_not_indexed == 0:
                cycles_per_read_not_indexed = read.getNumberCycles()

            # Check same cycles count for each reads not indexed
            error_cycles_per_read_not_indexes_count = cycles_per_read_not_indexed != read.getNumberCycles()

    # Identification type run according to data in RunInfos.xml : SR or PE
    if reads_not_indexed_count == 1:
        type_run_estimated = "SR-" + str(cycles_per_read_not_indexed) + " with " + str(
            reads_indexed_count) + " index"
        if reads_indexed_count > 1:
            type_run_estimated += "es"
    elif reads_not_indexed_count == 2:
        type_run_estimated = "PE-" + str(cycles_per_read_not_indexed) + " with " + str(
            reads_indexed_count) + " index"
        if reads_indexed_count > 1:
            type_run_estimated += "es"
    else:
        type_run_estimated = "Undetermined run type (" + str(reads_not_indexed_count) + " reads with " + str(
            reads_indexed_count) + " index)"
        if reads_indexed_count > 1:
            type_run_estimated += "es"
        type_run_estimated += ")"

    description_run = "Informations about this run:\n"
    description_run += "\t- Sequencer: " + common.get_instrument_name(run_id, conf) + ".\n"
    description_run += "\t- " + str(run_info.getFlowCellLaneCount()) + " lanes with " + str(
        run_info.alignToPhix.size()) + " aligned to Phix.\n"
    description_run += "\t- " + str(reads_not_indexed_count) + " read"
    if reads_not_indexed_count > 1:
        description_run += "s"
    description_run += " and " + str(reads_indexed_count) + " index"
    if reads_indexed_count > 1:
        description_run += "es"
    description_run += ".\n"

    if error_cycles_per_read_not_indexes_count or cycles_per_read_not_indexed == 0:
        description_run += "\t- ERROR : cycles count per read different between reads (" + str(
            cycles_count) + " total cycles).\n"
    else:
        description_run += "\t- " + str(cycles_per_read_not_indexed) + " cycles per read (" + str(
            cycles_count) + " total cycles).\n"

    description_run += "\t- Estimated run type: " + type_run_estimated + ".\n"

    attachment_file = str(hiseq_run.find_hiseq_run_path(run_id, conf)) + '/' + run_id + '/' + common.FIRST_BASE_REPORT_FILE

    # If the First base report file exists, send it by email
    if common.is_file_readable(attachment_file):

        message = 'You will find attached to this message the first base report for the run ' + \
                  run_id + '.\n\n' + description_run
        common.send_msg_with_attachment('[Aozan] First base report for the run ' + type_run_estimated + '  ' + run_id +
                                        ' on ' + common.get_instrument_name(run_id, conf),
                                        message, attachment_file, False, conf)
    else:
        # With other no attachment file
        message = 'You will find below the parameters of the run ' + run_id + '.\n\n' + description_run
        common.send_msg('[Aozan] New run ' + type_run_estimated + ' ' + run_id + ' on ' +
                        common.get_instrument_name(run_id, conf), message,
                        False, conf)

    return True
Esempio n. 32
0
def recompress(run_id, conf):
    """Proceed to recompression of a run.

    Arguments:
        run_id: The run id
        conf: configuration dictionary
    """

    common.log('INFO', 'Recompress step: Starting', conf)

    # Check if input root fastq root data exists
    if not common.is_dir_exists(FASTQ_DATA_PATH_KEY, conf):
        error("FASTQ data directory does not exist",
              "FASTQ data directory does not exist: " + conf[FASTQ_DATA_PATH_KEY], conf)
        return False

    start_time = time.time()
    fastq_input_dir = conf[FASTQ_DATA_PATH_KEY] + '/' + run_id

    # initial du for comparing with ending disk usage
    previous_du_in_bytes = common.du(fastq_input_dir)

    # get information about compression type
    compression_type = conf[RECOMPRESS_COMPRESSION_KEY]
    compression_level = conf[RECOMPRESS_COMPRESSION_LEVEL_KEY]
    compression_info_tuple = get_info_from_file_type(compression_type, compression_level)

    if compression_info_tuple is None:
        error("Unknown compression type",
              "Unknown compression type: " + compression_type, conf)
        return False

    (compression_type_result, output_file_extension, output_compression_command, output_decompression_command,
     compression_level_argument) = compression_info_tuple

    # The following list contains the processed type of files to recompress
    types_to_recompress = ["fastq.gz", "fastq"]

    # list of program to check if exists in path before execution
    program_set = {"bash", "tee", "touch", "chmod", "md5sum", output_compression_command, output_decompression_command}

    # get list of file to process
    input_files = []
    for extension in types_to_recompress:

        input_files.extend(list_files(fastq_input_dir, extension))
        simple_extension = os.path.splitext(extension)[-1][1:]
        extension_info_tuple = get_info_from_file_type(simple_extension)

        if extension_info_tuple is None:
            error("Unknown extension type",
                  "Unknown extension type: " + extension, conf)
            return False

        program_set.add(extension_info_tuple[3])

    # actual program list check
    for program in program_set:
        if not common.exists_in_path(program):
            error("Can't find all needed commands in PATH env var",
                  "Can't find all needed commands in PATH env var. Unable to find: " + program + " command.", conf)
            return False

    # Create executor and for parallelization of processus
    executor = Executors.newFixedThreadPool(int(conf[RECOMPRESS_THREADS_KEY]))
    workers = []

    # process each fastq and fastq.gz recursively in each fastq directory
    for input_file in input_files:

        simple_extension = os.path.splitext(input_file)[-1][1:]

        # get info about the type of input file
        extension_info_tuple = get_info_from_file_type(simple_extension)
        if extension_info_tuple is None:
            error("Unknown extension type",
                  "Unknown extension type: " + simple_extension, conf)
            return False

        input_decompression_command = extension_info_tuple[3]

        # get file base name and create output_file name, if file is already .fastq its ready to be base_input_file
        base_input_file = input_file[0: input_file.index(".fastq") + 6]
        output_file = base_input_file + "." + output_file_extension

        # Skip if the output_file already exists
        if not os.path.exists(output_file):

            # Create worker then execute in thread
            worker = Worker(input_file, output_file, input_decompression_command, output_compression_command,
                            output_decompression_command,
                            compression_level_argument,
                            common.is_conf_value_equals_true(RECOMPRESS_DELETE_ORIGINAL_FASTQ_KEY, conf))
            workers.append(worker)
            executor.execute(worker)

        else:
            common.log("WARNING", "Recompress step: Omitting processing file " + input_file + ". The associated output file " + output_file + " already exists.", conf)

    # Wait for all thread to finish
    executor.shutdown()
    while not executor.isTerminated():
        time.sleep(1)

    # Check if any worker is in error
    for worker in workers:
        if not worker.is_successful():
            error(worker.get_error_message(),
                  worker.get_long_error_message(), conf)
            return False

    # check new disk usage
    df_in_bytes = common.df(fastq_input_dir)
    du_in_bytes = common.du(fastq_input_dir)
    previous_du = previous_du_in_bytes / (1024 * 1024)
    df = df_in_bytes / (1024 * 1024 * 1024)
    du = du_in_bytes / (1024 * 1024)

    common.log("WARNING", "Recompress step: output disk free after step: " + str(df_in_bytes), conf)
    common.log("WARNING", "Recompress step: space previously used: " + str(previous_du_in_bytes), conf)
    common.log("WARNING", "Recompress step: space now used by step: " + str(du_in_bytes), conf)

    duration = time.time() - start_time

    msg = 'Ending recompression for run ' + run_id + '.' + \
          '\nJob finished at ' + common.time_to_human_readable(time.time()) + \
          ' without error in ' + common.duration_to_human_readable(duration) + '. '

    msg += '\n\nAfter recompress step FASTQ folder is now %.2f MB (previously %.2f MB) and %.2f GB still free.' % (
        du, previous_du, df)

    common.send_msg('[Aozan] Ending recompress for run ' + run_id + ' on ' +
                    common.get_instrument_name(run_id, conf), msg, False, conf)
    common.log('INFO', 'Recompress step: successful in ' + common.duration_to_human_readable(duration), conf)
    return True