Exemplo n.º 1
0
def test_set_elements_pass():
    tag = "dstag"
    elements = [Element([1, 2, 3, 4])]
    new_elements = [Element([5, 6, 7, 8])]
    expected_result = [new_elements[0].values]
    dataset = DataSet(tag, elements)
    dataset.elements = new_elements
    assert dataset.elements == expected_result
Exemplo n.º 2
0
    def aggreg_geracao(self, table_name, aggreg_unit):
        """Agrega os valores de geração para a unidade de agregação.

        :param string table_name: Nome da tabela.
        :param string aggreg_unit: [diario, mensal].
        :return: Dataframe agregado
        :rtype: pyspark.sql.Dataframe

        """
        st_time = time.time()


        t1 = Task(3, self.dataflow_tag, "aggreg_geracao", "2")
        t1_input = DataSet("i{}1".format('aggreg_geracao'), [Element([table_name,datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S"), aggreg_unit])])
        t1.add_dataset(t1_input)
        t1.begin()
        sql = ''
        if aggreg_unit == 'diario':
            sql = """
                SELECT r.subsistema, t.data, sum(t.valor) as valor
                FROM {} as t,
                    recursos as r
                WHERE t.guid_usina = r.guid_recurso
                GROUP BY r.subsistema, t.data
            """.format(table_name)
        elif aggreg_unit == 'mensal':
            sql = """
                SELECT r.subsistema, avg(t.valor) as valor
                FROM {} as t,
                    recursos as r
                WHERE t.guid_usina = r.guid_recurso
                GROUP BY r.subsistema
            """.format(table_name)
        df = self.spark.sql(sql)
        c = df.count()
        runtime = time.time() - st_time
        stats = {
            'task': 'aggregate_geracao',
            'currenttime': datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S"),
            'elapsedtime': runtime,
            'attributes':{
                'aggregationunit': aggreg_unit,
                'tablename': table_name,
                'count': c
            }
        }
        # TODO: Publicar a execução da agregação de geracao com a variável stats
        t1_output = DataSet("o{}1".format('aggreg_geracao'), [Element([stats['currenttime'],stats['elapsedtime'], stats['attributes']['count']])])
        t1.add_dataset(t1_output)
        t1.end()
        self.logger.info(stats)
        return df
Exemplo n.º 3
0
def end_task(task, label, out_elements):
    """
    :param label: Same as Transformation
    :param out_elements: List of outputs
    :return:
    """
    t_output = DataSet(f"o{label}", [Element([i]) for i in out_elements])
    task.add_dataset(t_output)
    task.end()

    return t_output
Exemplo n.º 4
0
def start_task(task_id, dataflow_tag, label, in_elements):
    """
    :param task_id: Integer for each task (0, 1 ... n). Usually use with enumerate
    :param label: Same as corresponding Transformation labels
    :param elements: List of inputs
    :return: t, t_input, t_output
    """
    in_elements = [Element([i]) for i in in_elements]
    t = Task(task_id, dataflow_tag, label)
    t_input = DataSet(f"i{label}", in_elements)
    t.add_dataset(t_input)
    t.begin()

    return t, t_input
Exemplo n.º 5
0
            Attribute("elapsedtime", AttributeType.NUMERIC),
            Attribute("elapsedtimeloadgeracao", AttributeType.NUMERIC),
            Attribute("elapsedtimeloadintercambio", AttributeType.NUMERIC),
            Attribute("elapsedtimeloadcarga", AttributeType.NUMERIC),
            Attribute("elapsedtimecalccarga", AttributeType.NUMERIC),
            Attribute("elapsedtimecalcstats", AttributeType.NUMERIC),
            Attribute("subsistemamaisdemandante", AttributeType.TEXT),
            Attribute("valormaisalto", AttributeType.NUMERIC)
      ])

    tf7.set_sets([tf7_input, tf7_output])
    df.add_transformation(tf7)
    df.save()

    t1 = Task(1, dataflow_tag, "load_data", "1")
    t1_input = DataSet("i{}1".format('load_data'), [Element([';'.join(stats['attributes']['datafiles']),';'.join(stats['attributes']['tables']), stats['currenttime'], stats['attributes']['aggregationunit'], stats['attributes']['csvseparator']])])
    t1.add_dataset(t1_input)
    t1.begin()

    st_time = time.time()
    processador.load_data(lista_datafiles, lista_tabelas, sep)
    runtime = time.time() - st_time

    stats = {
        'task': 'load_data',
        'currenttime': datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S"),
        'elapsedtime': runtime
    }

    # TODO: Publicar o início do fluxo com a variavel stats
    #######################
Exemplo n.º 6
0
  [Attribute("NUMERO_SEQUENCIAS", AttributeType.NUMERIC)])
tf2.set_sets([tf2_input, tf2_output])
df.add_transformation(tf2)

#Transormação para criar um alinhamento mafft: CriarAlinhamento
tf3 = Transformation("CriarAlinhamento")
tf3_input = Set("iCriarAlinhamento", SetType.INPUT,
  [Attribute("NUMERO_SEQUENCIAS", AttributeType.NUMERIC)])
tf3_output = Set("oCriarAlinhamento", SetType.OUTPUT,
  [Attribute("ALINHAMENTO", AttributeType.TEXT)])
tf3.set_sets([tf3_input, tf3_output])
df.add_transformation(tf3)
df.save()

t1 = Task(1, dataflow_tag, "ExtrairNome")
t1_input = DataSet("iExtrairNome", [Element(["dirin"])])
t1.add_dataset(t1_input)
t1.begin()

dirin = "/home/linux/"
# Abrindo o diretorio....
sequence_count = '0';  #contar as sequencias pro ajuste do MAFFT
# For file in os.listdir (dirin_do_ficheiro):
for file in os.listdir (dirin):
  if re.search ('.fasta$', file) is not None:
    #--- Mount directory and separate filename
    diretorio = file.split("/")
    nome = diretorio[diretorio.count('/')-1].split(".")  
    #--- Mount name of fasta and mafft files
    fasta = os.path.join (dirin, file)
    mafft = os.path.join (dirin, nome[0]+ ".mafft")
Exemplo n.º 7
0
    def calculate_carga(self, df_geracao, df_intercambio, aggreg_unit):
        """Calcula a carga global (Geração - Intercambio) unindo os dataframes fornecidos.

        :param pyspark.sql.Dataframe df_geracao: Dados de geracao.
        :param pyspark.sql.Dataframe df_intercambio: Dados de intercambio.
        :param string aggreg_unit: [diario, mensal].
        :return: Dados de carga.
        :rtype: pyspark.sql.Dataframe

        """
        st_time = time.time()
        st_time_total = time.time()


        t1 = Task(3, self.dataflow_tag, "calculate_carga", "4")
        t1_input = DataSet("i{}1".format('calculate_carga'), [Element([datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S"), aggreg_unit])])
        t1.add_dataset(t1_input)
        t1.begin()
        helpers.register_spark_table('geracao_aggr', df_geracao, self.spark, False)
        runtime_load_geracao = time.time() - st_time

        st_time = time.time()
        helpers.register_spark_table('intercambio_aggr', df_intercambio, self.spark, False)
        runtime_load_intercambio = time.time() - st_time

        sql = ""
        if aggreg_unit == 'diario':
            sql = """
                SELECT g.subsistema, g.data, (g.valor - i.valor) as valor
                FROM geracao_aggr as g,
                     intercambio_aggr as i
                WHERE g.subsistema = i.subsistema
                  AND g.data = i.data
            """
        elif aggreg_unit == 'mensal':
            sql = """
                SELECT g.subsistema, (g.valor - i.valor) as valor
                FROM geracao_aggr as g,
                     intercambio_aggr as i
                WHERE g.subsistema = i.subsistema
            """
        st_time = time.time()
        df_carga = self.spark.sql(sql)
        runtime_calc = time.time() - st_time
        st_time = time.time()
        helpers.register_spark_table('carga', df_carga, self.spark, False)
        runtime_load_carga = time.time() - st_time

        self.logger.info(df_carga.show())

        # sql = """
        #     SELECT subsistema, valor
        #     FROM
        #         (SELECT subsistema,
        #                valor,
        #                RANK() OVER (PARTITION BY valor ORDER BY valor DESC) as rnk
        #         FROM
        #             carga) as a
        #     WHERE rnk = 1
        #
        # """

        sql = """
            SELECT subsistema, valor
            FROM carga
            ORDER BY valor DESC
        """

        st_time = time.time()
        df_stats = self.spark.sql(sql)
        self.logger.info(df_stats.show())
        subsistema_mais_demandante = df_stats.select('subsistema').collect()[0].subsistema
        valor_mais_alto = df_stats.select('valor').collect()[0].valor
        runtime_stats = time.time() - st_time

        runtime_total = time.time() - st_time_total
        stats = {
            'task': 'calculate_carga',
            'currenttime': datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S"),
            'elapsedtime': runtime_total,
            'attributes':{
                'aggregation_unit': aggreg_unit,
                'elapsed_time_load_geracao': runtime_load_geracao,
                'elapsed_time_load_intercambio': runtime_load_intercambio,
                'elapsed_time_load_carga': runtime_load_carga,
                'elapsed_time_calc_carga': runtime_calc,
                'elapsed_time_stats': runtime_stats,
                'subsistema_mais_demandante': subsistema_mais_demandante,
                'valor_mais_alto': valor_mais_alto
            }
        }
        # Attribute("currenttime", AttributeType.TEXT),
        # Attribute("elapsedtime", AttributeType.NUMERIC),
        # Attribute("elapsedtimeloadgeracao", AttributeType.NUMERIC),
        # Attribute("elapsedtimeloadintercambio", AttributeType.NUMERIC),
        # Attribute("elapsedtimeloadcarga", AttributeType.NUMERIC),
        # Attribute("elapsedtimecalccarga", AttributeType.NUMERIC),
        # Attribute("elapsedtimecalcstats", AttributeType.NUMERIC),
        # Attribute("subsistemamaisdemandante", AttributeType.TEXT),
        # Attribute("valormaisalto", AttributeType.NUMERIC)
        t1_output = DataSet("o{}1".format('calculate_carga'),
            [
                Element([
                    stats['currenttime'],
                    stats['elapsedtime'],
                    stats['attributes']['elapsed_time_load_geracao'],
                    stats['attributes']['elapsed_time_load_intercambio'],
                    stats['attributes']['elapsed_time_load_carga'],
                    stats['attributes']['elapsed_time_calc_carga'],
                    stats['attributes']['elapsed_time_stats'],
                    stats['attributes']['subsistema_mais_demandante'],
                    stats['attributes']['valor_mais_alto']
                    ])])
        t1.add_dataset(t1_output)
        t1.end()
        # TODO: Publicar calculo da carga com a variável df_stats
        self.logger.info(stats)
        return df_carga
Exemplo n.º 8
0
def test_get_elements_pass():
    tag = "dstag"
    elements = [Element([1, 2, 3, 4])]
    expected_result = [elements[0].values]
    dataset = DataSet(tag, elements)
    assert dataset.elements == expected_result