def main():
    # make more specific if running multiple tests, on multiple machines, etc.
    TEST_DESC = 'Testing disks.'
    FORM_FACTOR = 'Unknown'

    # log and lock
    format = '%(asctime)s: %(message)s'
    logging.basicConfig(format=format, level=logging.INFO, datefmt='%H:%M:%S')
    # logging.getLogger().setLevel(logging.DEBUG)
    lock = threading.Lock()
    now_unix = int(time.time())
    devs_file_path = f'./output/devs.csv'
    badblocks_file_path = f'./output/badblocks.csv'
    test_file_path = f'./output/test.csv'
    smart_file_path = f'./output/smart.csv'

    if util.is_prod():
        assert not os.path.exists(
            devs_file_path
        ), f'An output file already exists. Delete before proceeding: {devs_file_path}'
        assert not os.path.exists(
            badblocks_file_path
        ), f'An output file already exists. Delete before proceeding: {badblocks_file_path}'
        assert not os.path.exists(
            test_file_path
        ), f'An output file already exists. Delete before proceeding: {test_file_path}'
        assert not os.path.exists(
            smart_file_path
        ), f'An output file already exists. Delete before proceeding: {smart_file_path}'

    # write test description row
    write_row(
        lock, test_file_path, {
            'create_time': now_unix,
            'time_stamp': get_time_stamp(),
            'test_desc': TEST_DESC,
            'form_factor': FORM_FACTOR
        })

    # devices
    dev_letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
    devs = [Dev('sd' + dl, now_unix) for dl in dev_letters]

    if util.is_prod():
        with concurrent.futures.ThreadPoolExecutor(
                max_workers=len(devs)) as executor:
            for dev in devs:
                executor.submit(process_dev, dev, lock, devs_file_path,
                                badblocks_file_path, smart_file_path)
    else:
        for dev in devs:
            process_dev(dev, lock, devs_file_path, badblocks_file_path,
                        smart_file_path)
Exemple #2
0
def clear_data():
    resp = Response(dumps({"success": True}), content_type="application/json")
    if is_prod(request):
        set_cookies(resp, {})
    else:
        session.clear()
    return resp
Exemple #3
0
    def get_ata_cap(dev_path):
        # either get the real output, or use a sample file
        cap_str = ''
        if util.is_prod():
            cap_str = subprocess.check_output(['smartctl', '-jc',
                                               dev_path]).decode()
        else:
            with open('input-sample/smart_during_test_clean.json',
                      'r') as sample_file:
                cap_str = sample_file.read()

        # parse output to dictionary
        info_json = json.loads(cap_str)
        ata = info_json[ATA_SMART_DATA]

        return ata
Exemple #4
0
def json_response(data: dict,
                  status=200,
                  headers=None,
                  cookies=None) -> Response:
    dump = dumps(data)
    resp = Response(dump, status, headers, content_type="application/json")
    if cookies:
        if is_prod(request):
            sess = get_session(request)
            sess.update(cookies)
            set_cookies(resp, sess)
        else:
            # dev environment, do a quick setup
            for k, v in cookies.items():
                session[k] = v
    return resp
def check_for_bad_blocks(log_file_path):
    if util.is_prod():
        # true/false for any bad blocks (normally false)
        bad_blocks = {
            'HasBadBlocks':
            True if os.path.getsize(log_file_path) > 0 else False
        }

        # specific number of bad blocks (normally 0)
        # https://www.oreilly.com/library/view/python-cookbook/0596001673/ch04s07.html
        with open(log_file_path, 'r') as log_file:
            bad_blocks['NumBadBlocks'] = len(log_file.readlines())

        return bad_blocks
    else:
        return {'HasBadBlocks': True, 'NumBadBlocks': 0}
Exemple #6
0
def get_attributes(dev_path):
    ATA_SMART_ATTRIBUTES = 'ata_smart_attributes'
    TABLE = 'table'
    NAME = 'name'
    RAW = 'raw'
    VALUE = 'value'

    # either get the real output, or use a sample file
    attr_str = ''
    if util.is_prod():
        attr_str = subprocess.check_output(['smartctl', '-jA',
                                            dev_path]).decode()
    else:
        with open('input-sample/smart_attributes.json', 'r') as sample_file:
            attr_str = sample_file.read()

    # parse json
    attr = json.loads(attr_str)
    attr_table = attr[ATA_SMART_ATTRIBUTES][TABLE]
    # print(attr_table)

    # https://www.ixsystems.com/community/resources/hard-drive-burn-in-testing.92/
    IMPORTANT_ATTRIBUTES = [
        'Reallocated_Sector_Ct', 'Current_Pending_Sector',
        'Offline_Uncorrectable'
    ]

    attrs = {
        d[NAME]: d[RAW][VALUE]
        for d in attr_table if d[NAME] in IMPORTANT_ATTRIBUTES
    }

    attr_error_count = sum(attrs.values())

    attr_has_errors = True if attr_error_count != 0 else False

    # originally was trying to return the actual attrs.
    # turned out not to work very well because some SSDs don't have any of these attributes.
    # ended up stumbling into the ideal strat! if there is an attr, count it. If not, doesn't matter.
    attr_return = {
        'attr_has_errors': attr_has_errors,
        'attr_error_count': attr_error_count
    }

    return attr_return
Exemple #7
0
def get_health(dev_path):
    SMART_STATUS = 'smart_status'
    PASSED = 'passed'

    # either get the real output, or use a sample file
    health_str = ''
    if util.is_prod():
        health_str = subprocess.check_output(['smartctl', '-jH',
                                              dev_path]).decode()
    else:
        with open('input-sample/smart_health_dirty.json',
                  'r') as sample_file:  # /smart_health_dirty.json
            health_str = sample_file.read()

    # parse json
    health = json.loads(health_str)

    return {'smart_health_passed': health[SMART_STATUS][PASSED]}
def run_badblocks(dev_path, log_file_path):

    # run badblocks
    before = int(time.time())
    if util.is_prod():
        subprocess.run(['badblocks', '-wsv', '-o', log_file_path, dev_path])
    else:
        time.sleep(2)
    after = int(time.time())
    elapsed = after - before

    # collect output
    bb_runtime = {
        'begin_time': before,
        'end_time': after,
        'elapsed': elapsed,
    }

    return bb_runtime
def get_info(path):
    # hard-coding based on the output format of lsblk
    PARENT_KEY = 'blockdevices'
    DEV_INDEX = 0

    info_str = ''
    if util.is_prod():
        info_str = subprocess.check_output(['lsblk', '-OJb', path]).decode()
    else:
        with open('input-sample/lsblk.json', 'r') as sample_file:
            info_str = sample_file.read()

    # parse output to dictionary
    info_json = json.loads(info_str)
    info = info_json[PARENT_KEY][DEV_INDEX]

    # creating a new "clean dictionary" from dict comprehensin also seems possible
    # and the comprehension thing seems all the rave with python, but fundamentally this is
    # an update, so not doing that here.
    for key, val in info.items():
        info[key] = val.strip() if isinstance(val, str) else val

    return info
Exemple #10
0
def run_test(dev_path):
    if util.is_prod():
        subprocess.run(['smartctl', '-t', 'long', dev_path])
    else:
        pass
Exemple #11
0
def get_capabilities(dev_path):
    POLL_FREQ = 60 if util.is_prod() else 2
    ATA_SMART_DATA = 'ata_smart_data'
    OFFLINE_DATA_COLLECTION = 'offline_data_collection'
    SELF_TEST = 'self_test'
    STATUS = 'status'
    POLLING_MINUTES = 'polling_minutes'
    EXTENDED = 'extended'
    STRING = 'string'

    #break this out because it needs to be refreshed each POLL_FREQ
    def get_ata_cap(dev_path):
        # either get the real output, or use a sample file
        cap_str = ''
        if util.is_prod():
            cap_str = subprocess.check_output(['smartctl', '-jc',
                                               dev_path]).decode()
        else:
            with open('input-sample/smart_during_test_clean.json',
                      'r') as sample_file:
                cap_str = sample_file.read()

        # parse output to dictionary
        info_json = json.loads(cap_str)
        ata = info_json[ATA_SMART_DATA]

        return ata

    # determine how long the extended test should take to run
    polling_minutes = get_ata_cap(
        dev_path)[SELF_TEST][POLLING_MINUTES][EXTENDED]

    # wait for the extended self test to complete
    elapsed = 0
    self_test_status = ''
    while True:
        time.sleep(POLL_FREQ)
        elapsed += POLL_FREQ
        self_test_status = get_ata_cap(dev_path)[SELF_TEST][STATUS][STRING]
        print(f'before match: {dev_path} - "{self_test_status}"')
        if re.match('in progress', self_test_status):
            print(f'in match: {dev_path}')
            print(
                f'Waiting for {dev_path} extended test to complete. {elapsed / 60}m of estimated {polling_minutes}m'
            )
        else:
            print(f'about to break: {dev_path}')
            break
        print(f'post break: {dev_path}')

    # wait for the offline collection to complete (unlikely but I'm not sure how this test works.)
    elapsed = 0
    offline_collection_status = ''
    while True:
        time.sleep(POLL_FREQ)
        elapsed += POLL_FREQ
        offline_collection_status = get_ata_cap(
            dev_path)[OFFLINE_DATA_COLLECTION][STATUS][STRING]
        if re.match('in progress', offline_collection_status):
            print(
                f'Waiting for {dev_path} offline collection to complete. {elapsed / 60}m of unknown minutes.'
            )
        else:
            break

    # collect results
    cap = {
        'polling_minutes': polling_minutes,
        'self_test_status': self_test_status,
        'offline_collection_status': offline_collection_status,
    }

    return cap