Esempio n. 1
0
 def setUp(self):
     self.op = GlacierJobOperationSensor(
         task_id='test_athena_sensor',
         aws_conn_id='aws_default',
         vault_name="airflow",
         job_id="1a2b3c4d",
         poke_interval=60 * 20,
     )
Esempio n. 2
0
class TestAmazonGlacierSensor(unittest.TestCase):
    def setUp(self):
        self.op = GlacierJobOperationSensor(
            task_id='test_athena_sensor',
            aws_conn_id='aws_default',
            vault_name="airflow",
            job_id="1a2b3c4d",
            poke_interval=60 * 20,
        )

    @mock.patch(
        "airflow.providers.amazon.aws.sensors.glacier.GlacierHook.describe_job",
        side_effect=[{
            "Action": "",
            "StatusCode": JobStatus.SUCCEEDED.value
        }],
    )
    def test_poke_succeeded(self, _):
        self.assertTrue(self.op.poke(None))

    @mock.patch(
        "airflow.providers.amazon.aws.sensors.glacier.GlacierHook.describe_job",
        side_effect=[{
            "Action": "",
            "StatusCode": JobStatus.IN_PROGRESS.value
        }],
    )
    def test_poke_in_progress(self, _):
        self.assertFalse(self.op.poke(None))

    @mock.patch(
        "airflow.providers.amazon.aws.sensors.glacier.GlacierHook.describe_job",
        side_effect=[{
            "Action": "",
            "StatusCode": ""
        }],
    )
    def test_poke_fail(self, _):
        with self.assertRaises(AirflowException) as context:
            self.op.poke(None)
        self.assertIn('Sensor failed', str(context.exception))
Esempio n. 3
0
    schedule_interval=None,
    start_date=days_ago(1),  # Override to match your needs
) as dag:
    # [START howto_glacier_create_job_operator]
    create_glacier_job = GlacierCreateJobOperator(
        task_id="create_glacier_job",
        aws_conn_id="aws_default",
        vault_name=VAULT_NAME,
    )
    JOB_ID = '{{ task_instance.xcom_pull("create_glacier_job")["jobId"] }}'
    # [END howto_glacier_create_job_operator]

    # [START howto_glacier_job_operation_sensor]
    wait_for_operation_complete = GlacierJobOperationSensor(
        aws_conn_id="aws_default",
        vault_name=VAULT_NAME,
        job_id=JOB_ID,
        task_id="wait_for_operation_complete",
    )
    # [END howto_glacier_job_operation_sensor]

    # [START howto_glacier_transfer_data_to_gcs]
    transfer_archive_to_gcs = GlacierToGCSOperator(
        task_id="transfer_archive_to_gcs",
        aws_conn_id="aws_default",
        gcp_conn_id="google_cloud_default",
        vault_name=VAULT_NAME,
        bucket_name=BUCKET_NAME,
        object_name=OBJECT_NAME,
        gzip=False,
        # Override to match your needs
        # If chunk size is bigger than actual file size