Exemplo n.º 1
0
def descargarManga(codigoManga = None, parametros = ParamDescarga):
    log.debug(codigoManga)
    manga = config.mangas[codigoManga]
    lstExclusions = exclusionFiles(manga)   
    log.info(" exclusions.txt == %s" % lstExclusions) 
    MangaGet.lstCapitulos(manga, parametros)
    listCapitulos = []
    #TODO: Debo seguir trabajando en el tema de las exclusiones que no esta bien
    for capitulo in manga.capitulos:
        if not (capitulo.code in lstExclusions):
            listCapitulos.append(capitulo)
    fileTime =  time.strftime("%Y%m%d")       
    fileDownload = MangaFile.getMangaDownloadFolder(manga.uCode, "t%s_%s"%(fileTime, config.CONST_DOWNLOAD_FILE))  
    for capitulo in listCapitulos:        
        MangaFile.crearDirectorio(capitulo, manga)
        capitulo = MangaGet.lstImagenes(manga, capitulo)
        totalImgCarpeta = MangaFile.totalArchivosCarpeta(capitulo)
        if(capitulo.length > totalImgCarpeta):
            log.debug("Descargando Imágenes del capítulo :: %s" % capitulo.code)
            file_ = open(fileDownload, 'a')
            file_.write("====== Resumen C%s ====== \n"%(capitulo.code))
            file_.close()
            descargarImagenesCapitulo(manga, capitulo, fileDownload)
            totalImgCarpeta = MangaFile.totalArchivosCarpeta(capitulo)
            file_ = open(fileDownload, 'a')
            file_.write("C%s \t Total:%s \t Descargados:%s \n"%(capitulo.code, capitulo.length, totalImgCarpeta))
            file_.close()
        else:
            log.error("Todos los archivos del capitulo %s ya han sido descargados"%capitulo.title)  
        
    return manga
Exemplo n.º 2
0
def exec_test1(config_data):
    add_test_info = AddTestInfo(
        15.1,
        "\napi/v2/cluster/<fsid>/server\n"
        "\napi/v2/cluster/<fsid>/server/<fqdn>\n",
    )

    add_test_info.started_info()

    try:
        test = Test(**config_data)

        cleaned_response = test.get(test.url)

        ids = [k["fqdn"] for k in cleaned_response]

        get_server_by_ids = lambda x: test.get(test.url + "/" + x)

        map(get_server_by_ids, ids)

        add_test_info.success("test ok")

    except AssertionError, e:
        log.error(e)
        add_test_info.failed("test error")
def exec_test(config_data):

    add_test_info = AddTestInfo(12, "\napi/v2/cluster/<fsid>/osd_config \n")
    add_test_info.started_info()

    try:

        config_ops = Test(**config_data)

        config_ops.get(config_ops.osd_config_url)

        patch = lambda data: config_ops.patch(config_ops.osd_config_url, data)

        data1 = {"nodeep-scrub": True, "nobackfill": True}

        patch(data1)

        data2 = {"nodeep-scrub": False, "nobackfill": False, "noscrub": False}

        patch(data2)

        config_ops.get(config_ops.osd_config_url)

        add_test_info.success("test ok")

    except AssertionError, e:
        log.error(e)
        add_test_info.failed("test error")
Exemplo n.º 4
0
def exec_test(config_data):

    add_test_info = AddTestInfo(
        11.1, "\naapi/v2/cluster/<fsid>/osd \n"
        "api/v2/cluster/<fsid>/osd/<osd_id> \n")

    add_test_info.started_info()

    try:
        osd_ops = Test(**config_data)

        contents = osd_ops.get(osd_ops.osd_url)

        osd_ids = [ids["id"] for ids in contents]

        patch = lambda data: [
            osd_ops.patch(osd_ops.osd_url + "/" + str(id), data)
            for id in osd_ids
        ]

        data1 = {"up": False}

        patch(data1)

        data2 = {"up": True}

        patch(data2)

        add_test_info.success("test ok")

    except AssertionError, e:
        log.error(e)
        add_test_info.failed("test error")
Exemplo n.º 5
0
def exec_test(config_data):
    add_test_info = AddTestInfo(
        10,
        "\napi/v2/cluster/<fsid>/mon\n"
        "api/v2/cluster/<fsid>/mon/<mon_id>\n"
        "api/v2/cluster/<fsid>/mon/<mon_id>/status\n",
    )

    add_test_info.started_info()

    try:
        test = Test(**config_data)

        cleaned_response = test.get(test.url)

        mon_ids = [mon["name"] for mon in cleaned_response]

        [test.get(test.url + "/" + mon_id) for mon_id in mon_ids]

        [test.get(test.url + "/" + mon_id + "/status") for mon_id in mon_ids]

        add_test_info.success("test ok")

    except AssertionError, e:
        log.error(e)
        add_test_info.failed("test error")
Exemplo n.º 6
0
def exec_test2(config_data):
    add_test_info = AddTestInfo(
        15.2, '\napi/v2/server'
        'api/v2/server/<fqdn>\n'
        'api/v2/server/<fqdn>/grains')

    add_test_info.started_info()

    try:
        test = Test(**config_data)

        cleaned_response = test.get(test.url2)

        fqdns = [k['fqdn'] for k in cleaned_response]

        get_server_by_ids = lambda x: test.get(test.url + "/" + x)

        map(get_server_by_ids, fqdns)

        get_server_by_grains = lambda x: test.get(test.url + "/" + x + "/" +
                                                  "grains")

        map(get_server_by_grains, fqdns)

        add_test_info.success('test ok')

    except AssertionError, e:
        log.error(e)
        add_test_info.failed('test error')
Exemplo n.º 7
0
def exec_test(config_data):
    add_test_info = AddTestInfo(7, '\napi/v2/event\n'
                                   'api/v2/cluster/<fsid>/event\n'
                                   'api/v2/server/<fqdn>/event\n')
    add_test_info.started_info()

    try:
        test = Test(**config_data)

        test.get(test.event_url)
        [test.get(test.event_url + '?severity=' + x) for x in test.severity]

        test.get(test.cluster_event_url)
        [test.get(test.cluster_event_url + '?severity=' + x) for x in test.severity]

        cleaned_response = test.get(test.server_event_url)
        servers = [server['fqdn'] for server in cleaned_response]
        [test.get(test.server_event_url + "/" + server) for server in servers]
        for server in servers:
            [test.get(test.server_event_url + "/" + server + '?severity=' + x) for x in test.severity]

        add_test_info.success('test ok')

    except AssertionError, e:
        log.error(e)
        add_test_info.failed('test error')
Exemplo n.º 8
0
def totalArchivosCarpeta(capitulo = Capitulo):
    files = []
    total = 0
    try:
        files = os.listdir(capitulo.folder)
    except OSError:
        log.error("No existe la carpeta %s"%capitulo.folder)
    finally:
        total = len(files)
    return total
Exemplo n.º 9
0
    def get(self, url):

        try:

            response = self.http_request.get(url)

            cleaned_response = clean_response(response)

            return cleaned_response

        except Exception:
            log.error('\n%s' % traceback.format_exc())
            raise AssertionError
Exemplo n.º 10
0
def exec_test(config_data):

    add_test_info = AddTestInfo(5, '\napi/v2/cluster/<fsid>/crush_rule_set\n')
    add_test_info.started_info()

    try:

        test = Test(**config_data)
        test.get(test.url)
        add_test_info.success('test ok')

    except AssertionError, e:
        log.error(e)
        add_test_info.failed('test error')
def exec_test(config_data):

    add_test_info = AddTestInfo(3, "api/v2/cluster/<fsid>/crush_map")
    add_test_info.started_info()

    try:
        test = Test(**config_data)

        test.get(test.url)

        add_test_info.success("test ok")

    except AssertionError, e:
        log.error(e)
        add_test_info.failed("test error")
Exemplo n.º 12
0
def exec_test(config_data):

    add_test_info = AddTestInfo(8, 'api/v2/info')
    add_test_info.started_info()

    try:
        test = Test(**config_data)

        test.get(test.url)

        add_test_info.success('test ok')

    except AssertionError, e:
        log.error(e)
        add_test_info.failed('test error')
Exemplo n.º 13
0
    def delete(self, url, request_api=True):

        try:

            response = self.http_request.delete(url)

            cleaned_response = clean_response(response)

            if request_api:
                check_request_id(self.api_request, cleaned_response['request_id'])

            return cleaned_response

        except Exception:
            log.error('\n%s' % traceback.format_exc())
            raise AssertionError
Exemplo n.º 14
0
    def patch(self, url, data, request_api=True):

        try:

            log.info('data to patch\n %s' % data)

            response = self.http_request.patch(url, data)

            cleaned_response = clean_response(response)

            if request_api:
                check_request_id(self.api_request, cleaned_response['request_id'])

            return cleaned_response

        except Exception:
            log.error('\n%s' % traceback.format_exc())
            raise AssertionError
Exemplo n.º 15
0
def exec_test(config_data):

    add_test_info = AddTestInfo(
        16, '\napi/v2/cluster/<fsid>/sync_object \n'
        'api/v2/cluster/<fsid>/sync_object/<sync_type>')
    add_test_info.started_info()

    try:
        test = Test(**config_data)

        cleaned_response = test.get(test.url)

        [test.get(test.url + "/" + k) for k in cleaned_response]

        add_test_info.success('test ok')

    except AssertionError, e:
        log.error(e)
        add_test_info.failed('test error')
Exemplo n.º 16
0
    def post(self, url, data, request_api=True):

        try:

            log.info("data to post:\n%s" % data)

            response = self.http_request.post(url, data)

            cleaned_response = clean_response(response)

            if request_api:

                check_request_id(self.api_request,
                                 cleaned_response["request_id"])

            return cleaned_response

        except Exception:
            log.error("\n%s" % traceback.format_exc())
            raise AssertionError
def exec_test(config_data):

    add_test_info = AddTestInfo(14, "\napi/v2/key\n" "api/v2/key/<minion_id>")
    add_test_info.started_info()

    try:
        test = Test(**config_data)

        cleaned_response = test.get(test.url)

        ids = [k["id"] for k in cleaned_response]

        get_minion_id = lambda x: test.get(test.url + "/" + x)

        map(get_minion_id, ids)

        add_test_info.success("test ok")

    except AssertionError, e:
        log.error(e)
        add_test_info.failed("test error")
Exemplo n.º 18
0
def exec_test2(config_data):

    add_test_info = AddTestInfo(
        11.2,
        "\napi/v2/cluster/<fsid>/osd/command \n"
        "api/v2/cluster/<fsid>/osd/<osd_id>/command \n"
        "api/v2/cluster/<fsid>/osd/<osd_id>/command/<command>\n",
    )
    add_test_info.started_info()

    try:
        osd_ops = Test(**config_data)

        contents = osd_ops.get(osd_ops.osd_url)

        osd_ids = [ids["id"] for ids in contents]

        [
            osd_ops.get(osd_ops.osd_url + "/" + str(id) + "/command")
            for id in osd_ids
        ]

        commands = osd_ops.get(osd_ops.osd_url + "/command")

        data = {"verify": False}

        [
            osd_ops.post(
                osd_ops.osd_url + "/" + str(osd_id) + "/" + "command" + "/" +
                command,
                data,
            ) for osd_id in osd_ids for command in commands
        ]

        add_test_info.success("test ok")

    except AssertionError, e:
        log.error(e)
        add_test_info.failed("test error")
Exemplo n.º 19
0
def exec_test(config_data):
    add_test_info = AddTestInfo(
        6, '\napi/v2/cluster/fsid/crush_type \n'
        'api/v2/cluster/fsid/crush_type/<type_id>')
    add_test_info.started_info()

    try:
        test = Test(**config_data)

        cleaned_response = test.get(test.config_url)

        keys = [key['id'] for key in cleaned_response]

        get = lambda x: test.get(test.config_url + "/" + str(x))

        map(get, keys)

        add_test_info.success('test ok')

    except AssertionError, e:
        log.error(e)
        add_test_info.failed('test error')
Exemplo n.º 20
0
def exec_test(config_data):

    add_test_info = AddTestInfo(
        9,
        "\napi/v2/cluster/<fsid>/log\n"
        "api/v2/server/<fqdn>/log\n"
        "api/v2/server/<fqdn>/log/<log_path>",
    )
    add_test_info.started_info()

    try:

        test = Test(**config_data)

        test.get(test.log_url)

        cleaned_response = test.get(test.server_log_url)
        fqdns = [fqdn["fqdn"] for fqdn in cleaned_response]

        cleaned_response = [
            test.get(test.server_log_url + "/" + fqdn + "/log")
            for fqdn in fqdns
        ]

        igonores = ["lastlog", "wtmp"]

        log_paths = [x for x in cleaned_response if x not in igonores]

        [
            test.get(test.server_log_url + "/" + fqdn + "/log" + "/" + path)
            for fqdn in fqdns for path in log_paths
        ]

        add_test_info.success("test ok")

    except AssertionError, e:
        log.error(e)
        add_test_info.failed("test error")
Exemplo n.º 21
0
def descargarArchivo(imagen = Imagen, capitulo = Capitulo, manga = Manga, fileDownload = None):
    estado = False
    try:
        filename = imagen.urlReal.split("/")[-1]
        filename = funciones.agregaCeros(filename, config.CONST_CANTIDAD_CERO_IMG)
        filePath = '%s/%s' %(capitulo.folder, filename)
        if(not os.path.isfile(filePath)):
            log.info('curl %s -o %s/%s'%( imagen.urlReal, capitulo.folder, filename))
            os.system('curl %s -o %s/%s' % (imagen.urlReal, capitulo.folder, filename))        
        else:
            log.error('El archivo [%s] ya existe'% filename)
        imagen.path = filePath
        estado = True
    except Exception:
        log.error("No se pudo descargar la img %s"%imagen.code)
        estado = False
    finally:
            
        if not estado:
            file_ = open(fileDownload, 'a')
            file_.write("%s \t C%s I%s \t %s \n"%(estado, capitulo.code, imagen.code, imagen.urlReal))
            file_.close()
    return imagen
Exemplo n.º 22
0
def exec_test(config_data):

    add_test_info = AddTestInfo(
        2, "\napi/v2/cluster/fsid/config \n"
        "api/v2/cluster/fsid/config/<key>")
    add_test_info.started_info()

    try:
        test = Test(**config_data)

        cleaned_response = test.get(test.config_url)

        keys = [key["key"] for key in cleaned_response]

        get = lambda x: test.get(test.config_url + "/" + str(x))

        map(get, keys)

        add_test_info.success("test ok")

    except AssertionError, e:
        log.error(e)
        add_test_info.failed("test error")
Exemplo n.º 23
0
    def run(self):
        log.debug('watcher thread init')

        while self.alive:

            self.lock.acquire()

            #your queue
            c = self.rds.lpop('queue:list')
            if c:
                log.debug('get key:%s' % c)

                try:
                    #do something
                    pass
                except:
                    excinfo = traceback.format_exc()
                    log.error('thread %d get error: %s' % (self.channel,excinfo))    
                finally:
                    self.lock.release()
            
            time.sleep(0.1)

        log.debug('thread %d leaving...' % self.channel)
Exemplo n.º 24
0
    def error_handler_500(e):
        exception = {}

        hash = hashlib.sha256()
        hash.update(os.urandom(8))
        exception["id"] = hash.hexdigest()

        exception["value"], exception["type"], exception["traceback"] = sys.exc_info()
        exception["traceback"] = traceback.extract_tb(exception["traceback"])

        log.error("An unhandled exception occurred:")
        log.error("ID:        " + exception["id"])
        log.error("Type:      " + repr(exception["type"]))
        log.error("Value:     " + repr(exception["value"]))
        log.error("Traceback: ")

        for Frame in exception["traceback"]:
            log.error(str(Frame))

        return flask.render_template("/error/500.html", exception=exception), 500
Exemplo n.º 25
0
def _create_elections(sid):
    # Step, er, 0: Update the request cache first, so elections have the most recent data to work with
    # (the entire requests module depends on its caches)
    request.update_cache(sid)

    max_sched_id, max_elec_id, num_elections = _get_schedule_stats(sid)
    log.debug(
        "create_elec",
        "Max sched ID: %s // Max elec ID: %s // Num elections already existing: %s // Size of next: %s"
        % (max_sched_id, max_elec_id, num_elections, len(next[sid])))

    # Step 2: Load up any elections that have been added while we've been idle and append them to the list
    unused_elec_id = db.c.fetch_list(
        "SELECT elec_id FROM r4_elections WHERE sid = %s AND elec_id > %s AND elec_used = FALSE AND elec_priority = FALSE ORDER BY elec_id",
        (sid, max_elec_id))
    unused_elecs = []
    for elec_id in unused_elec_id:
        unused_elecs.append(event.Election.load_by_id(elec_id))

    # Step 3: Insert elections where there's time and adjust predicted start times as necessary, if num_elections < 2 then create them where necessary
    i = 1
    running_time = current[sid].start_actual + current[sid].length()
    if len(next[sid]) > 0:
        next[sid][0].start = running_time
    while i < len(next[sid]):
        next_start = next[sid][i].start
        gap = next_start - running_time
        next_elec_i = None
        next_elec_length = playlist.get_average_song_length(sid)
        j = i
        while j < len(next[sid]):
            if next[sid][j].is_election:
                next_elec = j
                next_elec_length = next[sid][j].length()
                break
        if not next_elec_i and len(unused_elecs) > 0:
            next_elec_length = unused_elecs[0].length()

        # TODO: This algorithm DEFINITELY needs code/concept review
        # There are potential holes - it is not as comprehensive a scheduler as the previous
        # Rainwave scheduler, however it is vastly simplified.
        # One drawback is that you cannot schedule elections themselves to run at certain times.

        create_elecs = False
        # If the event we're looking at collides with the previous event, adjust this event to start later
        if gap <= 0:
            next[sid][i].start = running_time
            running_time += next[sid][i].length()
        # If we have no elections current in the next list and there's enough time to fit a song, stuff an election in
        # (this automatically takes into account unused elections, based on next_elec_length definition above)
        elif not next_elec_i and gap <= (next_elec_length * 1.4):
            next_elec = None
            # If we have an existing unused election, we can use that (as next_elec_length is already based on the first unused elec, this can happen)
            if len(unused_elecs) > 0:
                next_elec = unused_elecs.pop(0)
            # If not, create a new election timed to the gap (next_elec_length will be the average song length*1.4, so this will happen frequently)
            else:
                next_elec = _create_election(sid, running_time, gap)
            num_elections += 1
            next_elec.start = running_time
            running_time += next_elec.length()
            next[sid].insert(i, next_elec)
        # If it's more accurate to squeeze a created election in here than adjust the next event, move the event
        # *OR* the next event is too far out and we have elections in hand
        elif next_elec_i and ((gap <= (next_elec_length / 2)) or
                              (gap > (next_elec_length * 1.5))):
            next_elec = next[sid].pop(next_elec_i)
            next_elec.start = running_time
            running_time += next_elec.length()
            next[sid].insert(i, next_elec)
        # The next event is better off aligned
        else:
            next[sid][i].start = running_time
            running_time += next[sid][i].length()
        i += 1

    needed_elecs = config.get_station(sid,
                                      "num_planned_elections") - num_elections
    log.debug(
        "create_elec",
        "Before: Needed elecs: %s // Unused elecs: %s // Current num elections: %s // Next size: %s"
        % (needed_elecs, len(unused_elecs), num_elections, len(next[sid])))
    # Step 5: If we're at less than X elections available, create them (or use unused ones) and append them
    # No timing is required here, since we're simply outright appending to the end
    # (any elections appearing before a scheduled item would be handled by the block above)
    failures = 0
    while needed_elecs > 0 and failures <= 2:
        next_elec = None
        if len(unused_elecs) > 0:
            next_elec = unused_elecs.pop(0)
        else:
            next_elec = _create_election(sid, running_time)
        next_elec_length = next_elec.length()
        if next_elec_length > 0:
            next_elec.start = running_time
            running_time += next_elec.length()
            next[sid].append(next_elec)
            num_elections += 1
            needed_elecs -= 1
        else:
            log.error("create_elec",
                      "Election ID %s was faulty - zero length.  Deleting.")
            next_elec.delete()
            failures += 1
    if failures >= 2:
        log.error("create_elec", "Total failure when creating elections.")
    log.debug(
        "create_elec",
        "After: Unused elecs: %s // Current num elections: %s // Next size: %s"
        % (len(unused_elecs), num_elections, len(next[sid])))
Exemplo n.º 26
0
def move(orig=None, dest=None):
    log.debug("[mv] %s -> %s "%(orig, dest))
    if os.path.isdir(orig) :
        shutil.move(orig, dest)
    else:
        log.error("La carpeta [%s] no exíste "%orig)
Exemplo n.º 27
0
def exec_test(config_data):

    add_test_info = AddTestInfo(1, "api/v2/cluster/<fsid>/cli")
    add_test_info.started_info()

    try:

        test = Test(**config_data)

        commands = [
            "ceph osd tree",
            ["ceph", "-s"],
            # ["ceph", "osd", "dump"] # this type of command fails, i.e list of 3 elements .
        ]

        """
        commands = [
            'ceph osd tree',
            ['ceph', '-s'],
            'ceph osd pool delete test_rbd test_rbd --yes-i-really-really-mean-it',
            # positive:
            'ceph osd pool create test_rbd 100',

            ## Create a test image that will be used in the api testing
            'rbd --cluster ceph  create test0 --image-format 2  --size 4096 --pool test_rbd --name client.test_rbd --keyring /etc/ceph/test_rbd.keyring',

            ## cp
            'rbd --cluster ceph  cp test_rbd/test0 test_rbd/test4',
            'rbd --cluster ceph  snap create --pool test_rbd --image test0 --snap snap101',
            'rbd --cluster ceph  cp test_rbd/test0@snap101 test_rbd/test5',
            'rbd --cluster ceph  cp test_rbd/test0@snap101 test_rbd/test6',

            ## remove image:
            'rbd --cluster ceph  create test100 --image-format 2  --size 4096 --pool test_rbd --name client.test_rbd --keyring /etc/ceph/test_rbd.keyring',
            'rbd --cluster ceph  remove -p test_rbd --image test100',
            'rbd --cluster ceph  create test100 --image-format 2  --size 4096 --pool test_rbd --name client.test_rbd --keyring /etc/ceph/test_rbd.keyring',
            'rbd --cluster ceph  remove test_rbd/test100',
            'rbd --cluster ceph  create test100 --image-format 2  --size 4096 --pool test_rbd --name client.test_rbd --keyring /etc/ceph/test_rbd.keyring',
            'rbd --cluster ceph  rm -p test_rbd --image test100',

            ## bench-write
            'rbd --cluster ceph  bench-write -p test_rbd --image test0 --io-size 1K --io-threads 3 --io-total 1M --io-pattern rand',
            'rbd --cluster ceph  bench-write -p test_rbd --image test0 --io-size 1K --io-threads 3 --io-total 1M --io-pattern seq',
            'rbd --cluster ceph  bench-write -p test_rbd --image test0 --io-size 1M --io-threads 3 --io-total 1G --io-pattern rand',
            'rbd --cluster ceph  bench-write -p test_rbd --image test0 --io-size 1G --io-threads 3 --io-total 2G --io-pattern rand',

            ## info
            'rbd --cluster ceph  info test_rbd/test0',

            ##resize,
            'rbd --cluster ceph  resize -p test_rbd --image test0 -s 4000M --allow-shrink',
            'rbd --cluster ceph  resize test_rbd/test0 -s 4096M',

            ## snap
            'rbd --cluster ceph  snap create --pool test_rbd --image test0 --snap snap1',
            'rbd --cluster ceph  snap remove --pool test_rbd --image test0 --snap snap1',
            'rbd --cluster ceph  snap create test_rbd/test0@snap1',
            'rbd --cluster ceph  snap list test_rbd/test0',
            'rbd --cluster ceph  snap rename test_rbd/test0@snap1 test_rbd/test0@snap1-1',
            'rbd --cluster ceph  snap rename test_rbd/test0@snap1-1 test_rbd/test0@snap1',
            'rbd --cluster ceph  snap rename --pool test_rbd --image test0 --snap snap1 --dest-pool test_rbd --dest test0 --dest-snap snap1-1',
            'rbd --cluster ceph  snap rename --pool test_rbd --image test0 --snap snap1-1 --dest-pool test_rbd --dest test0 --dest-snap snap1',
            'rbd --cluster ceph  snap protect test_rbd/test0@snap1',
            'rbd --cluster ceph  snap unprotect test_rbd/test0@snap1',
            'rbd --cluster ceph  snap protect -p test_rbd  --image test0 --snap snap1',
            'rbd --cluster ceph  snap unprotect -p test_rbd  --image test0 --snap snap1',
            'rbd --cluster ceph  snap create test_rbd/test0@snap100',
            'rbd --cluster ceph  snap rollback  test_rbd/test0@snap100',
            'rbd --cluster ceph  snap remove test_rbd/test0@snap1',
            'rbd --cluster ceph  snap purge test_rbd/test0',

            ## children, clone, flatten
            'rbd --cluster ceph  snap create --pool test_rbd --image test0 --snap snap121',
            'rbd --cluster ceph  snap protect test_rbd/test0@snap121',
            'rbd --cluster ceph  clone test_rbd/test0@snap121 test_rbd/test40',
            'rbd --cluster ceph  children test_rbd/test0@snap121',
            'rbd --cluster ceph  children -p test_rbd --image test0 --snap snap121 --format xml',
            'rbd --cluster ceph  children -p test_rbd --image test0 --snap snap121 --format json',
            'rbd --cluster ceph  children -p test_rbd --image test0 --snap snap121 --pretty-format',
            'rbd --cluster ceph  clone --pool test_rbd --image test0 --snap snap121 --dest-pool test_rbd --dest test41 --object-size 16K --stripe-unit 2 --stripe-count 2',
            'rbd --cluster ceph  clone test_rbd/test0@snap121 test_rbd/test50',
            'rbd --cluster ceph  flatten test_rbd/test40',
            'rbd --cluster ceph  flatten -p test_rbd --image test50',

            ## diff
            'rbd --cluster ceph  diff test_rbd/test0',
            'rbd --cluster ceph   diff -p test_rbd --image test0 --snap snap121 --whole-object --format xml',
            'rbd --cluster ceph   diff -p test_rbd --image test0 --snap snap121 --whole-object --format json',
            'rbd --cluster ceph   diff -p test_rbd --image test0 --snap snap121 --whole-object --format xml --pretty-format',

            ## export
            'rbd --cluster ceph  export test_rbd/test0 /tmp/RBD-test-image-export0',
            'rbd --cluster ceph  export -p test_rbd --image test0 --path /tmp/RBD-test-image-export1',
            'rbd --cluster ceph  export test_rbd/test0@snap1 /tmp/RBD-test-image-export2',

            ## feature disable enable
            'rbd --cluster ceph  create test-disenable --image-format 2  --size 4096 --pool test_rbd --name client.test_rbd --keyring /etc/ceph/test_rbd.keyring',
            'rbd --cluster ceph  feature disable test_rbd/test-disenable layering',
            'rbd --cluster ceph  remove -p test_rbd --image test-disenable',
            'rbd --cluster ceph  feature enable test_rbd/test0 journaling',
            'rbd --cluster ceph  feature disable test_rbd/test0 journaling',

            ##image-meta list, set, get
            'rbd --cluster ceph  image-meta set test_rbd/test0 rbd_cache_size false',
            'rbd --cluster ceph  image-meta list test_rbd/test0',
            'rbd --cluster ceph  image-meta list -p test_rbd --image test0 --format json',
            'rbd --cluster ceph  image-meta list -p test_rbd --image test0 --format xml',
            'rbd --cluster ceph  image-meta list -p test_rbd --image test0 --pretty-format',
            'rbd --cluster ceph  image-meta get test_rbd/test0 rbd_cache_size',

            ##list
            'rbd --cluster ceph  list',
            'rbd --cluster ceph  ls',

            ## rename
            'rbd --cluster ceph  rename test_rbd/test0 test_rbd/testRen',
            'rbd --cluster ceph  rename -p test_rbd --image testRen --dest-pool test_rbd --dest test0',

            # Status
            'rbd --cluster ceph  status test_rbd/test0',

            # Showmapped. Needs manual steps to test these. Tested manually
            # 'rbd --cluster ceph  showmapped --format json',
            # 'rbd --cluster ceph  showmapped --format xml',
            # 'rbd --cluster ceph  showmapped',
            # 'rbd --cluster ceph  showmapped --pretty-format',

            # unmap - can't be automated. involves running ext4 file system creation commands and mount cmds
            # unmap

            # watch: This doesn't return until enter is pressed. Already a defect exists. Can't be used in API
            # 'rbd --cluster ceph  watch test0'

            ## du
            # 'rbd --cluster ceph  du -p test_rbd',
            # 'rbd --cluster ceph  du -p test_rbd --image test0',
            # 'rbd --cluster ceph  du -p test_rbd --image test0 --pretty-format',
            # 'rbd --cluster ceph  du -p test_rbd --image test0 --snap snap1 --from-snap 3424',


            ## diff
            'rbd --cluster ceph  diff --pool test_rbd --image test0 --snap snap121 --from-snap snap121 --whole-object --format json',
            'rbd --cluster ceph  diff --pool test_rbd --image test0 --snap snap121 --from-snap snap121 --whole-object --pretty-format',
            'rbd --cluster ceph  diff --pool test_rbd --image test0 --snap snap121 --from-snap snap121 --pretty-format',
            'rbd --cluster ceph  diff --pool test_rbd --image test0 --snap snap121 --from-snap snap121',
            'rbd --cluster ceph  diff test_rbd/test0@snap121',
            'rbd --cluster ceph  diff --pool test_rbd --image test0 --snap snap121',

            # -----------------------------------------------------------------

            # Help commands:
            ' rbd	help	bench-write	',
            ' rbd	help	children	',
            ' rbd	help	clone	',
            ' rbd	help	copy	',
            ' rbd	help	create	',
            ' rbd	help	diff	',
            ' rbd	help	disk-usage	',
            ' rbd	help	export	',
            ' rbd	help	export-diff	',
            ' rbd	help	feature	disable	',
            ' rbd	help	feature	enable	',
            ' rbd	help	flatten	',
            ' rbd	help	image-meta	get	',
            ' rbd	help	image-meta	list	',
            ' rbd	help	image-meta	remove	',
            ' rbd	help	image-meta	set	',
            ' rbd	help	import	',
            ' rbd	help	import-diff	',
            ' rbd	help	info	',
            ' rbd	help	journal	export	',
            ' rbd	help	journal	import	',
            ' rbd	help	journal	info	',
            ' rbd	help	journal	inspect	',
            ' rbd	help	journal	reset	',
            ' rbd	help	journal	status	',
            ' rbd	help	list	',
            ' rbd	help	lock	add	',
            ' rbd	help	lock	list	',
            ' rbd	help	lock	remove	',
            ' rbd	help	map	',
            ' rbd	help	merge-diff	',
            ' rbd	help	mirror	image	demote	',
            ' rbd	help	mirror	image	disable	',
            ' rbd	help	mirror	image	enable	',
            ' rbd	help	mirror	image	promote	',
            ' rbd	help	mirror	image	resync	',
            ' rbd	help	mirror	image	status	',
            ' rbd	help	mirror	pool	disable	',
            ' rbd	help	mirror	pool	enable	',
            ' rbd	help	mirror	pool	info	',
            ' rbd	help	mirror	pool	peer	add	',
            ' rbd	help	mirror	pool	peer	remove	',
            ' rbd	help	mirror	pool	peer	set	',
            ' rbd	help	mirror	pool	status	',
            ' rbd	help	nbd	list	',
            ' rbd	help	nbd	map	',
            ' rbd	help	nbd	unmap	',
            ' rbd	help	object-map	rebuild	',
            ' rbd	help	remove	',
            ' rbd	help	rename	',
            ' rbd	help	resize	',
            ' rbd	help	showmapped	',
            ' rbd	help	snap	create	',
            ' rbd	help	snap	list	',
            ' rbd	help	snap	protect	',
            ' rbd	help	snap	purge	',
            ' rbd	help	snap	remove	',
            ' rbd	help	snap	rename	',
            ' rbd	help	snap	rollback	',
            ' rbd	help	snap	unprotect	',
            ' rbd	help	status	',
            ' rbd	help	unmap	',

            ########################################################################

            # create images

            ' rbd create test_rbd/test_image02 --size 4G',
            ' rbd create test_rbd/test_image01 --size 2G',
            ' rbd create test_rbd/test_image03 --size 6G',
            ' rbd create test_rbd/test_image04 --size 5G',
            ' rbd create test_rbd/test_image05 --size 3G',

            # list images in a pool

            'rbd list --pool test_rbd --format json --pretty-format json',
            'rbd list --long --pool test_rbd --format json --pretty-format json',
            'rbd list --long --pool test_rbd --format plain',
            'rbd list --long --pool test_rbd --format xml',
            'rbd list --long --pool test_rbd --format xml --pretty-format xml',

            # resize an image

            'rbd resize --pool test_rbd --image test_image03 --size 3G --no-progress --allow-shrink',
            'rbd resize --pool test_rbd --image test_image01 --size 1G --allow-shrink',
            'rbd resize --pool test_rbd --image test_image05 --size 5G',

            # add lock

            'rbd lock add --shared test_lock_tag test_rbd/test_image01 test_lock_id',
            'rbd lock add test_rbd/test_image02 test_lock_id',
            'rbd lock add --shared test_lock_tag test_rbd/test_image05 test_lock_id_03',
            'rbd lock add test_rbd/test_image04 test_lock_id_02',

            # list locks

            'rbd lock ls test_rbd/test_image01',
            'rbd lock ls test_rbd/test_image02 --format xml',
            'rbd lock ls --pool test_rbd --image test_image01 --format json --pretty-format json',

            # unlock lock (remove)

            'rbd lock rm test_rbd/test_image01 test_lock_id client.14325',
            'rbd lock rm --pool test_rbd --image test_image02 test_rbd/test_image02 test_lock_id client.14326',
            'rbd lock rm test_rbd/test_image04 test_lock_id_02 client.14329',

            # map

            # creating image and snaps to test map

            'rbd create test_rbd/test_image06 --image-feature layering --size 2G',
            'rbd snap create test_rbd/test_image06@snap060',

            'rbd map --pool test_rbd --image test_image06',
            'rbd map --pool test_rbd --image test_image06 --snap snap060',
            'rbd map --pool test_rbd --image test_image06 --snap snap060 -o fsid=aaaa_dddddddddddddddd_rrrrrrrrrrrrrrrrrrrrrrr_ share',
            'rbd map --pool test_rbd --image test_image06 --snap snap060 -o fsid=aaaa_dddddddddddddddd_rrrrrrrrrrrrrrrrrrrrrrr_ crc',

            # image-meta set

            'rbd image-meta set test_rbd/test_image06 test_key02 test_value02',

            # image-meta get

            'rbd image-meta get test_rbd/test_image06 test_key02',

            # image-meta list

            'rbd image-meta list --pool test_rbd --image test_image06 --format xml --pretty-format test_rbd/test_image06',
            'rbd image-meta list --pool test_rbd --image test_image06 --format json test_rbd/test_image06',

            # image-meta list
            'rbd image-meta remove --pool test_rbd --image test_image06 test_rbd/test_image06 test_key01'
            'rbd image-meta remove test_rbd/test_image06 test_key02',

            # image-meta remove - no error message for deleting non existing key

            'rbd image-meta remove --pool test_rbd --image test_image06 test_rbd/test_image06 test_key01',
            'rbd image-meta remove test_rbd/test_image06 test_key02'

        ]
        """

        data_to_post = map(lambda x: {"command": x}, commands)

        results = [
            test.post(test.cli_url, each_data, request_api=False)
            for each_data in data_to_post
        ]

        failed = [
            (command, result)
            for result, command in zip(results, commands)
            if result["status"] != 0 and result["err"] != ""
        ]

        passed = len(commands) - len(failed)

        log.info("no of commands submitted: %s" % len(commands))
        log.info("no of commands passed: %s" % passed)

        if failed:
            log.info("no of commands failed : %s" % len(failed))
            raise AssertionError(failed)

        add_test_info.success("test ok")

    except AssertionError, e:
        log.error(e)
        add_test_info.failed("test error")
Exemplo n.º 28
0
def _create_elections(sid):
	# Step, er, 0: Update the request cache first, so elections have the most recent data to work with
	# (the entire requests module depends on its caches)
	request.update_cache(sid)

	max_sched_id, max_elec_id, num_elections = _get_schedule_stats(sid)
	log.debug("create_elec", "Max sched ID: %s // Max elec ID: %s // Num elections already existing: %s // Size of next: %s" % (max_sched_id, max_elec_id, num_elections, len(next[sid])))

	# Step 2: Load up any elections that have been added while we've been idle and append them to the list
	unused_elec_id = db.c.fetch_list("SELECT elec_id FROM r4_elections WHERE sid = %s AND elec_id > %s AND elec_used = FALSE AND elec_priority = FALSE ORDER BY elec_id", (sid, max_elec_id))
	unused_elecs = []
	for elec_id in unused_elec_id:
		unused_elecs.append(event.Election.load_by_id(elec_id))

	# Step 3: Insert elections where there's time and adjust predicted start times as necessary, if num_elections < 2 then create them where necessary
	i = 1
	running_time = current[sid].start_actual + current[sid].length()
	if len(next[sid]) > 0:
		next[sid][0].start = running_time
	while i < len(next[sid]):
		next_start = next[sid][i].start
		gap = next_start - running_time
		next_elec_i = None
		next_elec_length = playlist.get_average_song_length(sid)
		j = i
		while j < len(next[sid]):
			if next[sid][j].is_election:
				next_elec = j
				next_elec_length = next[sid][j].length()
				break
		if not next_elec_i and len(unused_elecs) > 0:
			next_elec_length = unused_elecs[0].length()

		# TODO: This algorithm DEFINITELY needs code/concept review
		# There are potential holes - it is not as comprehensive a scheduler as the previous
		# Rainwave scheduler, however it is vastly simplified.
		# One drawback is that you cannot schedule elections themselves to run at certain times.

		create_elecs = False
		# If the event we're looking at collides with the previous event, adjust this event to start later
		if gap <= 0:
			next[sid][i].start = running_time
			running_time += next[sid][i].length()
		# If we have no elections current in the next list and there's enough time to fit a song, stuff an election in
		# (this automatically takes into account unused elections, based on next_elec_length definition above)
		elif not next_elec_i and gap <= (next_elec_length * 1.4):
			next_elec = None
			# If we have an existing unused election, we can use that (as next_elec_length is already based on the first unused elec, this can happen)
			if len(unused_elecs) > 0:
				next_elec = unused_elecs.pop(0)
			# If not, create a new election timed to the gap (next_elec_length will be the average song length*1.4, so this will happen frequently)
			else:
				next_elec = _create_election(sid, running_time, gap)
			num_elections += 1
			next_elec.start = running_time
			running_time += next_elec.length()
			next[sid].insert(i, next_elec)
		# If it's more accurate to squeeze a created election in here than adjust the next event, move the event
		# *OR* the next event is too far out and we have elections in hand
		elif next_elec_i and ((gap <= (next_elec_length / 2)) or (gap > (next_elec_length * 1.5))):
			next_elec = next[sid].pop(next_elec_i)
			next_elec.start = running_time
			running_time += next_elec.length()
			next[sid].insert(i, next_elec)
		# The next event is better off aligned
		else:
			next[sid][i].start = running_time
			running_time += next[sid][i].length()
		i += 1

	needed_elecs = config.get_station(sid, "num_planned_elections") - num_elections
	log.debug("create_elec", "Before: Needed elecs: %s // Unused elecs: %s // Current num elections: %s // Next size: %s" % (needed_elecs, len(unused_elecs), num_elections, len(next[sid])))
	# Step 5: If we're at less than X elections available, create them (or use unused ones) and append them
	# No timing is required here, since we're simply outright appending to the end
	# (any elections appearing before a scheduled item would be handled by the block above)
	failures = 0
	while needed_elecs > 0 and failures <= 2:
		next_elec = None
		if len(unused_elecs) > 0:
			next_elec = unused_elecs.pop(0)
		else:
			next_elec = _create_election(sid, running_time)
		next_elec_length = next_elec.length()
		if next_elec_length > 0:
			next_elec.start = running_time
			running_time += next_elec.length()
			next[sid].append(next_elec)
			num_elections += 1
			needed_elecs -= 1
		else:
			log.error("create_elec", "Election ID %s was faulty - zero length.  Deleting.")
			next_elec.delete()
			failures += 1
	if failures >= 2:
		log.error("create_elec", "Total failure when creating elections.")
	log.debug("create_elec", "After: Unused elecs: %s // Current num elections: %s // Next size: %s" % (len(unused_elecs), num_elections, len(next[sid])))
Exemplo n.º 29
0
def exec_test(config_data):

    add_test_info = AddTestInfo(17, '\napi/v2/user\n' 'api/v2/user/<pk>')
    add_test_info.started_info()

    try:
        test = Test(**config_data)

        # --------------- get users --------------

        response = test.get(test.user_url)

        clean_response(response)

        # --------------- create new users --------------

        new_user = UserCreationDefination()

        new_user.username = names.get_first_name().lower()
        new_user.email = '*****@*****.**'
        new_user.password = '******'

        log.info('new username: %s' % new_user.username)

        response = test.post(test.user_url, new_user.__dict__)

        new_user_created = clean_response(response)

        new_uid = new_user_created['id']

        logged_out = test.logout()

        assert logged_out, "logout failed"

        # ------------- edit the user details by logging back as the new user ---------------

        new_config_data = config_data.copy()

        new_config_data['username'] = new_user.username
        new_config_data['password'] = new_user.password

        test2 = Test(**new_config_data)

        edit = UserCreationDefination()

        edit.email = '*****@*****.**'

        response = test2.patch(test.user_url + "/" + str(new_uid),
                               edit.__dict__)

        clean_response(response)

        test2.logout()

        # --------------- delete the created user ---------------

        test3 = Test(**config_data)

        response = test3.delete(test3.user_url + "/" + str(new_uid))

        clean_response(response)

        response = test3.get(test2.user_url)

        clean_response(response)

        add_test_info.success('test ok')

    except Exception:
        log.error('\n%s' % traceback.format_exc())
        add_test_info.failed('test error')

    return add_test_info.completed_info(config_data['log_copy_location'])
Exemplo n.º 30
0
def exec_test(config_data):
    add_test_info = AddTestInfo(
        13, '\napi/v2/cluster/<fsid>/pool \n'
        'api/v2/cluster/<fsid>/pool/<pool_id>')
    add_test_info.started_info()

    try:

        pool_name = 'pool_' + "api_testing" + str(random.randint(1, 1000))

        pool_ops = Test(**config_data)

        pool_ops.get(pool_ops.pool_url)

        # ------------ creating pool --------------

        pool_definition = PoolDefination()

        pool_definition.name = pool_name
        pool_definition.size = 3
        pool_definition.pg_num = 64
        pool_definition.crush_ruleset = 0
        pool_definition.min_size = 2
        pool_definition.crash_replay_interval = 0
        pool_definition.pg_num = 64
        pool_definition.hashpspool = True
        pool_definition.quota_max_objects = 0
        pool_definition.quota_max_bytes = 0

        log.debug('pool definition complete')

        log.info('json data \n%s:' % pool_definition.__dict__)

        pool_ops.post(pool_ops.pool_url, pool_definition.__dict__)

        # ------------- editing pool ------------

        pools = pool_ops.get(pool_ops.pool_url)

        my_pool = None

        for pool in pools:
            if pool_definition.name == pool['name']:
                log.debug('got matching pool')
                my_pool = pool
                log.debug(my_pool)
                break

        # asserts if my_pool is none,
        assert my_pool is not None, ("did not find any pool with the name %s" %
                                     pool_definition.name)

        pool_editing = PoolDefination()

        pool_editing.name = pool_name + "_renamed"

        pool_ops.patch(pool_ops.pool_url + "/" + str(my_pool['id']),
                       pool_editing.__dict__)

        # ---------------- deleting pool ---------------

        pool_ops.delete(pool_ops.pool_url + "/" + str(my_pool['id']))

        add_test_info.success('test ok')

    except AssertionError, e:
        log.error(e)
        add_test_info.failed('test error')
Exemplo n.º 31
0
def makeDir(dirName = None):
    if not os.path.exists(dirName):
        os.makedirs(dirName)
        log.info("mkdir %s"%dirName)
    else:
        log.error("La carpeta [%s] ya exíste"%dirName)
Exemplo n.º 32
0
def organizarVolumenes(manga = Manga):
    lstFolder = MangaFile.listarArchivosCarpeta(manga)       
    if(len(lstFolder) > 0):
        totPre = len(lstFolder[0]) - 1
        lstVol, status = VolumenScan.listaVolumenes(manga)        
        for volumen in lstVol:
            lstFolderInVol = []
            capIni = volumen.capitulos[-1].name.split(" ")[-1]
            capFin = volumen.capitulos[0].name.split(" ")[-1]
            capIni = funciones.eliminarChrToEnd(capIni, ".")
            capFin = funciones.eliminarChrToEnd(capFin, ".")
            capIni = "C%s"%funciones.prefijo(str(capIni), totPre)
            capFin = "C%s"%funciones.prefijo(str(capFin), totPre)
            log.info( "%s ):: %s -> %s"%(volumen.name, capIni, capFin))
            for folder in lstFolder:
                downloadDir =  MangaFile.getMangaDownloadFolder(manga.uCode, folder)
                if capIni <= folder and folder <= capFin:
                    lstFolderInVol.append(downloadDir)
            if(lstFolderInVol.__len__()> 0):                
                volumenName = volumen.name.split(" ")[-1]
                volumenName = "%s-%s-%s-%s"%(funciones.prefijo(str(volumenName), 2), str(manga.uCode).title(), capIni, capFin)
                volumensDir = "%s%s/volumenes/%s"%(config.CONST_PATH, manga.uCode, volumenName)
                volumensDir = volumensDir.replace(' ', '')
                log.debug("[mkdir] =>%s"%volumensDir)
                MangaFile.makeDir(volumensDir)
                for folder in lstFolderInVol:
                    folderName = folder.split("/")[-1]
                    destFolder = "%s/%s"%(volumensDir, folderName)
                    MangaFile.move(folder, destFolder)         
    else:
        log.error("No se han encontrado capítulos en la carpeta download")               
    volumensDir = "%s%s/volumenes/"  %(config.CONST_PATH, manga.uCode)
    coverDir = "%s%s/covers/"  %(config.CONST_PATH, manga.uCode)
    lstVolumen = MangaFile.listaArchivosPath(volumensDir)
    lstCovers = MangaFile.listaArchivosPath(coverDir)
    log.info("Poniendo las carátulas en los volúmenes")
    if(len(lstVolumen) > 0) and (len(lstCovers) > 0):
        for volumen in lstVolumen:
            volFolder = "%s%s"%(volumensDir, volumen)
            log.debug(volFolder)
            numVol = volumen.split("-")[0]
            frontFile = "%s_v%s_front.jpg"%(manga.id, numVol)
            fullFile = "%s_v%s_full.jpg"%(manga.id, numVol)
            tocFile = "%s_v%s_toc.jpg"%(manga.id, numVol)
            backFile = "%s_v%s_back.jpg"%(manga.id, numVol)
            if (frontFile in lstCovers):
                origen = "%s%s"%(coverDir,frontFile)
                destino = "%s/001_front.jpg"%(volFolder)                
                MangaFile.copy(origen, destino)
            if (fullFile in lstCovers):
                origen = "%s%s"%(coverDir,fullFile)
                destino = "%s/002_full.jpg"%(volFolder)                
                MangaFile.copy(origen, destino)
            if (tocFile in lstCovers):
                origen = "%s%s"%(coverDir,tocFile)
                destino = "%s/003_toc.jpg"%(volFolder)                
                MangaFile.copy(origen, destino)
            if (backFile in lstCovers):
                origen = "%s%s"%(coverDir,backFile)
                destino = "%s/z004_back.jpg"%(volFolder)                
                MangaFile.copy(origen, destino)