示例#1
0
def main():
    hr = HadoopRuntime()
    settings = hr.settings
    print(settings)
    hr.clean_working_dir()
    output_dir = hr.get_hdfs_working_dir("dump_dir")

    sqoop = MySqoop(settings.Param.Sqoop2Server_Host,
                    int(settings.Param.Sqoop2Server_Port))

    # First, Create an connection
    conn_name = "import_m_job%s_blk%s" % (settings.GlobalParam["jobId"],
                                          settings.GlobalParam["blockId"])
    conn_ret = sqoop.create_connection(
        conn_name=conn_name,
        conn_str=settings.Param.connection_string,
        username=settings.Param.connection_username,
        password=settings.Param.connection_password)

    # Then, Run sqoop import job
    fw_ps = {
        "output.storageType": "HDFS",
        "output.outputFormat": "TEXT_FILE",
        "output.outputDirectory": output_dir
    }
    if settings.Param.where_clause and settings.Param.where_clause != None and str(
            settings.Param.where_clause).strip(" ") != "":
        table_sql = "select %s from %s where ${CONDITIONS} and %s " % (
            settings.Param.input_columns, settings.Param.table_name,
            settings.Param.where_clause)
    else:
        table_sql = "select %s from %s where ${CONDITIONS}" % (
            settings.Param.input_columns, settings.Param.table_name)
    partition_column = settings.Param.partition_column

    print settings.Param.where_clause
    print table_sql

    job_ps = {
        "table.sql": table_sql,
        "table.partitionColumn": partition_column
    }
    job_name = "import job :: username(%s) job %s, block %s" % (
        settings.GlobalParam["userName"], settings.GlobalParam["jobId"],
        settings.GlobalParam["blockId"])
    r = sqoop.create_import_job(job_name=job_name,
                                connection_id=conn_ret["id"],
                                framework_params=fw_ps,
                                job_params=job_ps)
    pp(r)
    sqoop.run_job(r['id'])
    sqoop.wait_job(r['id'])
    sqoop.delete_job(r['id'])

    # Finally, Delete connection we created
    sqoop.delete_connection_by_id(conn_ret["id"])

    settings.Output.output_dir.val = output_dir

    print("Done")
示例#2
0
def main():
    hr = HadoopRuntime("spec.json")
    settings = hr.settings
    print(settings)

    # allocate output_path, and clean it
    output_path = get_s3_working_dir(settings, "output_path")
    s3_delete(output_path, settings)

    # Prepare working directory
    hr.hdfs_clean_working_dir()
    temp_path = hr.get_hdfs_working_dir("temp")

    # build parameters for hadoop job
    jar_file = "mahout-core-1.0-SNAPSHOT-job.jar"
    hadoop_params = {}
    hadoop_params["HADOOP_MAPRED_HOME"] = "/usr/lib/hadoop-mapreduce"
    hadoop_params_str = " ".join(
        ["%s=%s" % (k, v) for k, v in hadoop_params.items()])

    jar_defs = {}
    jar_defs[
        "fs.s3n.awsAccessKeyId"] = '"%s"' % settings.Param.AWS_ACCESS_KEY_ID
    jar_defs[
        "fs.s3n.awsSecretAccessKey"] = '"%s"' % settings.Param.AWS_ACCESS_KEY_SECRET
    jar_defs[
        "fs.s3.awsAccessKeyId"] = '"%s"' % settings.Param.AWS_ACCESS_KEY_ID
    jar_defs[
        "fs.s3.awsSecretAccessKey"] = '"%s"' % settings.Param.AWS_ACCESS_KEY_SECRET
    jar_defs["mapreduce.framework.name"] = "yarn"
    jar_defs[
        "yarn.resourcemanager.address"] = settings.Param.yarn_resourcemanager
    jar_defs[
        "yarn.resourcemanager.scheduler.address"] = settings.Param.yarn_resourcemanager_scheduler
    jar_defs["fs.defaultFS"] = settings.Param.hdfs_root
    jar_defs["mapreduce.output.fileoutputformat.compress"] = "false"
    jar_defs_str = " ".join(["-D %s=%s" % (k, v) for k, v in jar_defs.items()])

    other_args = OrderedDict()
    other_args["similarityClassname"] = "SIMILARITY_EUCLIDEAN_DISTANCE"
    other_args["input"] = settings.Input.ratings.as_datasource['URL']
    other_args["usersFile"] = settings.Input.usersFile.as_datasource['URL']
    other_args["output"] = output_path
    other_args["tempDir"] = temp_path
    other_args_str = " ".join(
        ["--%s %s" % (k, v) for k, v in other_args.items()])

    cmd_str = '%s hadoop jar %s org.apache.mahout.cf.taste.hadoop.item.RecommenderJob %s %s' % \
            (hadoop_params_str, jar_file, jar_defs_str, other_args_str)
    print("Executing:")
    print(cmd_str)
    ret = cmd(cmd_str)
    if ret != 0:
        print("Job failed")
        sys.exit(ret)

    settings.Output.output_path.val = output_path
    print("Done")
示例#3
0
def main():
    hr = HadoopRuntime()
    settings = hr.settings
    print(settings)
    hr.clean_working_dir()
    output_dir = hr.get_hdfs_working_dir("dump_dir")

    sqoop = MySqoop(settings.Param.Sqoop2Server_Host, int(settings.Param.Sqoop2Server_Port))

    # First, Create an connection
    conn_name = "import_m_job%s_blk%s" % (
            settings.GlobalParam["jobId"],
            settings.GlobalParam["blockId"])
    conn_ret = sqoop.create_connection(conn_name=conn_name,
            conn_str=settings.Param.connection_string,
            username=settings.Param.connection_username,
            password=settings.Param.connection_password)

    # Then, Run sqoop import job
    fw_ps = {
        "output.storageType": "HDFS",
        "output.outputFormat": "TEXT_FILE",
        "output.outputDirectory": output_dir
    }
    if settings.Param.where_clause and  settings.Param.where_clause != None and str(settings.Param.where_clause).strip(" ") != "": 
        table_sql = "select %s from %s where ${CONDITIONS} and %s " %(settings.Param.input_columns,settings.Param.table_name,settings.Param.where_clause)
    else:
        table_sql = "select %s from %s where ${CONDITIONS}" %(settings.Param.input_columns,settings.Param.table_name) 
    partition_column = settings.Param.partition_column
    
    print settings.Param.where_clause
    print table_sql

    job_ps = {
        "table.sql": table_sql,
        "table.partitionColumn": partition_column
    }
    job_name = "import job :: username(%s) job %s, block %s" % (
            settings.GlobalParam["userName"],
            settings.GlobalParam["jobId"],
            settings.GlobalParam["blockId"])
    r = sqoop.create_import_job(job_name=job_name,
                                connection_id=conn_ret["id"],
                                framework_params=fw_ps,
                                job_params=job_ps)
    pp(r)
    sqoop.run_job(r['id'])
    sqoop.wait_job(r['id'])
    sqoop.delete_job(r['id'])

    # Finally, Delete connection we created
    sqoop.delete_connection_by_id(conn_ret["id"])

    settings.Output.output_dir.val = output_dir

    print("Done")
示例#4
0
def main():
    hr = HadoopRuntime("spec.json")
    settings = hr.settings
    print(settings)

    # allocate output_path, and clean it
    output_path = get_s3_working_dir(settings, "output_path")
    s3_delete(output_path, settings)

    # Prepare working directory
    hr.hdfs_clean_working_dir()
    temp_path = hr.get_hdfs_working_dir("temp")

    # build parameters for hadoop job
    jar_file = "mahout-core-1.0-SNAPSHOT-job.jar"
    hadoop_params = {}
    hadoop_params["HADOOP_MAPRED_HOME"] = "/usr/lib/hadoop-mapreduce"
    hadoop_params_str = " ".join(["%s=%s" % (k, v) for k, v in hadoop_params.items()])

    jar_defs = {}
    jar_defs["fs.s3n.awsAccessKeyId"] = '"%s"' % settings.Param.AWS_ACCESS_KEY_ID
    jar_defs["fs.s3n.awsSecretAccessKey"] = '"%s"' % settings.Param.AWS_ACCESS_KEY_SECRET
    jar_defs["fs.s3.awsAccessKeyId"] = '"%s"' % settings.Param.AWS_ACCESS_KEY_ID
    jar_defs["fs.s3.awsSecretAccessKey"] = '"%s"' % settings.Param.AWS_ACCESS_KEY_SECRET
    jar_defs["mapreduce.framework.name"] = "yarn"
    jar_defs["yarn.resourcemanager.address"] = settings.Param.yarn_resourcemanager
    jar_defs["yarn.resourcemanager.scheduler.address"] = settings.Param.yarn_resourcemanager_scheduler
    jar_defs["fs.defaultFS"] = settings.Param.hdfs_root
    jar_defs["mapreduce.output.fileoutputformat.compress"] = "false"
    jar_defs_str = " ".join(["-D %s=%s" % (k, v) for k, v in jar_defs.items()])

    other_args = OrderedDict()
    other_args["similarityClassname"] = "SIMILARITY_EUCLIDEAN_DISTANCE"
    other_args["input"] = settings.Input.ratings.as_datasource["URL"]
    other_args["usersFile"] = settings.Input.usersFile.as_datasource["URL"]
    other_args["output"] = output_path
    other_args["tempDir"] = temp_path
    other_args_str = " ".join(["--%s %s" % (k, v) for k, v in other_args.items()])

    cmd_str = "%s hadoop jar %s org.apache.mahout.cf.taste.hadoop.item.RecommenderJob %s %s" % (
        hadoop_params_str,
        jar_file,
        jar_defs_str,
        other_args_str,
    )
    print("Executing:")
    print(cmd_str)
    ret = cmd(cmd_str)
    if ret != 0:
        print("Job failed")
        sys.exit(ret)

    settings.Output.output_path.val = output_path
    print("Done")
示例#5
0
def main():
    hr = HadoopRuntime("spec.json")
    settings = hr.settings
    print(settings)

    # Prepare working directory
    hr.hdfs_clean_working_dir()
    # allocate temp_path
    temp_path = hr.get_hdfs_working_dir("temp")
    # allocate output_path
    output_path = hr.get_hdfs_working_dir("output_path")
    
    # build parameters for hadoop job
    jar_file = "./mahout-core-1.0-SNAPSHOT-job.jar"
    hadoop_params = {}
    hadoop_params["HADOOP_MAPRED_HOME"] = "/usr/lib/hadoop-mapreduce"
    hadoop_params_str = " ".join(["%s=%s" % (k,v) for k,v in hadoop_params.items()])

    jar_defs = {}
    jar_defs["mapreduce.framework.name"] = "yarn"
    jar_defs["yarn.resourcemanager.address"] = settings.Param.yarn_resourcemanager
    jar_defs["yarn.resourcemanager.scheduler.address"] = settings.Param.yarn_resourcemanager_scheduler
    jar_defs["fs.defaultFS"] = settings.Param.hdfs_root
    jar_defs["mapreduce.output.fileoutputformat.compress"] = "false"
    jar_defs_str = " ".join(["-D %s=%s" % (k,v) for k,v in jar_defs.items()])

    other_args = OrderedDict()
    other_args["similarityClassname"] = "SIMILARITY_EUCLIDEAN_DISTANCE"
    other_args["input"] = settings.Input.ratings.val
    other_args["usersFile"] = settings.Input.usersFile.val
    other_args["output"] = output_path
    other_args["tempDir"] = temp_path
    other_args_str = " ".join(["--%s %s" % (k,v) for k,v in other_args.items()])
    
    line_num =get_the_line_of_transaction(settings.Input.ratings.val)
    
    if line_num >0: 
        cmd_str = '%s hadoop jar %s org.apache.mahout.cf.taste.hadoop.item.RecommenderJob %s %s' % \
                (hadoop_params_str, jar_file, jar_defs_str, other_args_str)
        print("Executing:")
        print(cmd_str)
        ret = cmd(cmd_str)
        if ret != 0:
            print("Job failed")
            sys.exit(ret)
    else:
        print "Collaborative Input Transaction Matrix is empty. Skip the calcuating."   
    settings.Output.cl_result.val = output_path

    print("Done")
示例#6
0
def main():
    hr = HadoopRuntime()
    settings = hr.settings
    print(settings)
    hr.clean_working_dir()
    output_dir = hr.get_hdfs_working_dir("message_dir")

    sqoop = MySqoop(settings.Param.Sqoop2Server_Host,
                    int(settings.Param.Sqoop2Server_Port))

    # First, Create an connection
    conn_name = "import_m_job%s_blk%s" % (settings.GlobalParam["jobId"],
                                          settings.GlobalParam["blockId"])
    conn_ret = sqoop.create_connection(
        conn_name=conn_name,
        conn_str=settings.Param.connection_string,
        username=settings.Param.connection_username,
        password=settings.Param.connection_password)

    # Then, Run sqoop import job
    fw_ps = {
        "output.storageType": "HDFS",
        "output.outputFormat": "TEXT_FILE",
        "output.outputDirectory": output_dir
    }
    job_ps = {
        "table.sql":
        "select UserId,Description,RefreshDate from Message where ${CONDITIONS}",
        "table.partitionColumn": "UserId"
    }
    job_name = "import job :: username(%s) job %s, block %s" % (
        settings.GlobalParam["userName"], settings.GlobalParam["jobId"],
        settings.GlobalParam["blockId"])
    r = sqoop.create_import_job(job_name=job_name,
                                connection_id=conn_ret["id"],
                                framework_params=fw_ps,
                                job_params=job_ps)
    pp(r)
    sqoop.run_job(r['id'])
    sqoop.wait_job(r['id'])
    sqoop.delete_job(r['id'])

    # Finally, Delete connection we created
    sqoop.delete_connection_by_id(conn_ret["id"])

    settings.Output.message_dir.val = output_dir

    print("Done")
示例#7
0
def main():
    hr = HadoopRuntime("spec.json")
    settings = hr.settings
    print(settings)

    ds = json.load(open(settings.Input.DS))

    if ds['Type'] != "AWS_S3":
        raise ValueError("Invalid data_source type: '%s'" % ds['Type'])

    # Prepare working directory
    hr.hdfs_clean_working_dir()
    output_dir = hr.get_hdfs_working_dir("sentiment_result")
    settings.Output.sentiment_result.val = output_dir

    AWS_ACCESS_KEY_ID = ds['Meta']['key']
    AWS_SECRET_ACCESS_KEY = ds['Meta']['token']

    # Execute "hadoop jar"
    jar_file = "HelloAvro-1.1-jar-with-dependencies.jar"
    hadoop_params = {}
    hadoop_params["HADOOP_MAPRED_HOME"] = "/usr/lib/hadoop-mapreduce"
    hadoop_params["AWS_ACCESS_KEY_ID"] = ds['Meta']['key']
    hadoop_params["AWS_SECRET_ACCESS_KEY"] = ds['Meta']['token']
    hadoop_params_str = " ".join(
        ["%s=%s" % (k, v) for k, v in hadoop_params.items()])

    jar_defs = {}
    jar_defs["fs.s3n.awsAccessKeyId"] = '"%s"' % AWS_ACCESS_KEY_ID
    jar_defs["fs.s3n.awsSecretAccessKey"] = '"%s"' % AWS_SECRET_ACCESS_KEY
    jar_defs["fs.s3.awsAccessKeyId"] = '"%s"' % AWS_ACCESS_KEY_ID
    jar_defs["fs.s3.awsSecretAccessKey"] = '"%s"' % AWS_SECRET_ACCESS_KEY
    jar_defs["mapreduce.framework.name"] = "yarn"
    jar_defs[
        "yarn.resourcemanager.address"] = settings.Param.yarn_resourcemanager
    jar_defs[
        "yarn.resourcemanager.scheduler.address"] = settings.Param.yarn_resourcemanager_scheduler
    jar_defs["fs.defaultFS"] = settings.Param.hdfs_root
    jar_defs_str = " ".join(["-D %s=%s" % (k, v) for k, v in jar_defs.items()])

    cmd_str = '%s hadoop jar %s %s %s %s' % (
        hadoop_params_str, jar_file, jar_defs_str, ds['URL'], output_dir)
    print("Executing:")
    print(cmd_str)
    ret = cmd(cmd_str)
    print("exit code = %d" % ret)
    sys.exit(ret)
示例#8
0
def main():
    hr = HadoopRuntime()
    settings = hr.settings
    print(settings)
    hr.clean_working_dir()
    output_dir = hr.get_hdfs_working_dir("message_dir")

    sqoop = MySqoop(settings.Param.Sqoop2Server_Host, int(settings.Param.Sqoop2Server_Port))

    # First, Create an connection
    conn_name = "import_m_job%s_blk%s" % (
            settings.GlobalParam["jobId"],
            settings.GlobalParam["blockId"])
    conn_ret = sqoop.create_connection(conn_name=conn_name,
            conn_str=settings.Param.connection_string,
            username=settings.Param.connection_username,
            password=settings.Param.connection_password)

    # Then, Run sqoop import job
    fw_ps = {
        "output.storageType": "HDFS",
        "output.outputFormat": "TEXT_FILE",
        "output.outputDirectory": output_dir
    }
    job_ps = {
        "table.sql": "select UserId,Description,RefreshDate from Message where ${CONDITIONS}",
        "table.partitionColumn": "UserId"
    }
    job_name = "import job :: username(%s) job %s, block %s" % (
            settings.GlobalParam["userName"],
            settings.GlobalParam["jobId"],
            settings.GlobalParam["blockId"])
    r = sqoop.create_import_job(job_name=job_name,
                                connection_id=conn_ret["id"],
                                framework_params=fw_ps,
                                job_params=job_ps)
    pp(r)
    sqoop.run_job(r['id'])
    sqoop.wait_job(r['id'])
    sqoop.delete_job(r['id'])

    # Finally, Delete connection we created
    sqoop.delete_connection_by_id(conn_ret["id"])

    settings.Output.message_dir.val = output_dir

    print("Done")
示例#9
0
def main():
    hr = HadoopRuntime("spec.json")
    settings = hr.settings
    print(settings)

    # allocate output_path, and clean it
    output_path = get_s3_working_dir(settings, "output_path")
    s3_delete(output_path, settings)

    # build parameters for hadoop job
    jar_file = "avro_tools/hadoop-streaming-2.0.0-mr1-cdh4.6.0.jar"
    hadoop_params = {}
    hadoop_params["HADOOP_MAPRED_HOME"] = "/usr/lib/hadoop-mapreduce"
    hadoop_params_str = " ".join(
        ["%s=%s" % (k, v) for k, v in hadoop_params.items()])

    jar_defs = {}
    jar_defs["mapred.job.name"] = "avro-streaming"
    jar_defs["mapred.reduce.tasks"] = "0"
    jar_defs["mapred.output.compress"] = "false"
    jar_defs[
        "fs.s3n.awsAccessKeyId"] = '"%s"' % settings.Param.AWS_ACCESS_KEY_ID
    jar_defs[
        "fs.s3n.awsSecretAccessKey"] = '"%s"' % settings.Param.AWS_ACCESS_KEY_SECRET
    jar_defs[
        "fs.s3.awsAccessKeyId"] = '"%s"' % settings.Param.AWS_ACCESS_KEY_ID
    jar_defs[
        "fs.s3.awsSecretAccessKey"] = '"%s"' % settings.Param.AWS_ACCESS_KEY_SECRET
    jar_defs["mapreduce.framework.name"] = "yarn"
    jar_defs[
        "yarn.resourcemanager.address"] = settings.Param.yarn_resourcemanager
    jar_defs[
        "yarn.resourcemanager.scheduler.address"] = settings.Param.yarn_resourcemanager_scheduler
    jar_defs["fs.defaultFS"] = settings.Param.hdfs_root
    jar_defs_str = " ".join(["-D %s=%s" % (k, v) for k, v in jar_defs.items()])

    other_args = OrderedDict()
    other_args[
        "files"] = "avro_tools/avro-1.7.4-cdh4.5.0.2.jar,avro_tools/avro-mapred-1.7.4-cdh4.5.0.2-hadoop2.jar"
    other_args[
        "libjars"] = "avro_tools/avro-1.7.4-cdh4.5.0.2.jar,avro_tools/avro-mapred-1.7.4-cdh4.5.0.2-hadoop2.jar"
    other_args["mapper"] = "org.apache.hadoop.mapred.lib.IdentityMapper"
    other_args["inputformat"] = "org.apache.avro.mapred.AvroAsTextInputFormat"
    other_args["input"] = settings.Input.avro_path.val
    other_args["output"] = output_path
    other_args_str = " ".join(
        ["-%s %s" % (k, v) for k, v in other_args.items()])

    cmd_str = '%s hadoop jar %s %s %s' % (hadoop_params_str, jar_file,
                                          jar_defs_str, other_args_str)
    print("Executing:")
    print(cmd_str)
    ret = cmd(cmd_str)
    if ret != 0:
        print("Job failed")
        sys.exit(ret)

    settings.Output.output_path.val = output_path
    print("Done")
示例#10
0
def main():
    hr = HadoopRuntime("spec.json")
    settings = hr.settings
    print(settings)

    ds = json.load(open(settings.Input.DS))

    if ds['Type'] != "AWS_S3":
        raise ValueError("Invalid data_source type: '%s'" % ds['Type'])

    # Prepare working directory
    hr.hdfs_clean_working_dir()
    output_dir = hr.get_hdfs_working_dir("sentiment_result")
    settings.Output.sentiment_result.val = output_dir

    AWS_ACCESS_KEY_ID = ds['Meta']['key']
    AWS_SECRET_ACCESS_KEY = ds['Meta']['token']

    # Execute "hadoop jar"
    jar_file = "HelloAvro-1.1-jar-with-dependencies.jar"
    hadoop_params = {}
    hadoop_params["HADOOP_MAPRED_HOME"] = "/usr/lib/hadoop-mapreduce"
    hadoop_params["AWS_ACCESS_KEY_ID"] = ds['Meta']['key']
    hadoop_params["AWS_SECRET_ACCESS_KEY"] = ds['Meta']['token']
    hadoop_params_str = " ".join(["%s=%s" % (k,v) for k,v in hadoop_params.items()])

    jar_defs = {}
    jar_defs["fs.s3n.awsAccessKeyId"] = '"%s"' % AWS_ACCESS_KEY_ID
    jar_defs["fs.s3n.awsSecretAccessKey"] = '"%s"' % AWS_SECRET_ACCESS_KEY
    jar_defs["fs.s3.awsAccessKeyId"] = '"%s"' % AWS_ACCESS_KEY_ID
    jar_defs["fs.s3.awsSecretAccessKey"] = '"%s"' % AWS_SECRET_ACCESS_KEY
    jar_defs["mapreduce.framework.name"] = "yarn"
    jar_defs["yarn.resourcemanager.address"] = settings.Param.yarn_resourcemanager
    jar_defs["yarn.resourcemanager.scheduler.address"] = settings.Param.yarn_resourcemanager_scheduler
    jar_defs["fs.defaultFS"] = settings.Param.hdfs_root
    jar_defs_str = " ".join(["-D %s=%s" % (k,v) for k,v in jar_defs.items()])

    cmd_str = '%s hadoop jar %s %s %s %s' % (hadoop_params_str, jar_file, jar_defs_str, ds['URL'], output_dir)
    print("Executing:")
    print(cmd_str)
    ret = cmd(cmd_str)
    print("exit code = %d" % ret)
    sys.exit(ret)
示例#11
0
def main():
    hr = HadoopRuntime()
    settings = hr.settings
    match_result_output_dir = hr.get_hdfs_working_dir("match_result")
    settings.Output.match_result.val = match_result_output_dir
    match_analysis_output_dir = hr.get_hdfs_working_dir("match_analysis")
    settings.Output.match_analysis.val = match_analysis_output_dir

    #SPARK_HOME=/home/run/spark-1.1.0-bin-cdh4
    #/home/run/spark_word_segement.jar
    #    os.system("SPARK_HOME=/home/ansibler/work/spark/spark-1.1.0-bin-cdh4")
    os.system(
        '''SPARK_HOME=/home/run/spark-1.1.0-bin-cdh4 \
&& $SPARK_HOME/bin/spark-submit --class \"com.zetdata.hero.trial.SimpleApp\" \
--master %s \
--num-executors 3 --driver-memory 1024m  --executor-memory 1024m   --executor-cores 1 \
--conf "spark.executor.extraJavaOptions=-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:MaxPermSize=1024m" \
/home/run/spark_word_segement.jar \
%s %s %s %s %s ''' %
        (settings.Param.spark_host, settings.Input.jd_dir.val,
         settings.Input.rs_dir.val, settings.Output.match_result.val,
         settings.Output.match_analysis.val, settings.Input.white_dict.val))
    print("Done")
示例#12
0
def main():
    hr = HadoopRuntime("spec.json")
    settings = hr.settings
    print(settings)

    s3_path = settings.Input.s3_path.val
    content_type = settings.Param.Metadata_Type

    s3_set_metadata(s3_path, settings, {"Content-Type": content_type})
    list_html_link = s3_make_list(s3_path, settings)
    s3_set_acl(s3_path, settings, {"public-read": None})

    settings.Output.list_html.val = list_html_link

    print("Done")
示例#13
0
def main():
    hr = HadoopRuntime("spec.json")
    settings = hr.settings
    print(settings)

    # Prepare working directory
    hr.hdfs_clean_working_dir()
    # allocate temp_path
    temp_path = hr.get_hdfs_working_dir("temp")
    # allocate output_path
    output_path = hr.get_hdfs_working_dir("output_path")

    # build parameters for hadoop job
    jar_file = "./mahout-core-1.0-SNAPSHOT-job.jar"
    hadoop_params = {}
    hadoop_params["HADOOP_MAPRED_HOME"] = "/usr/lib/hadoop-mapreduce"
    hadoop_params_str = " ".join(
        ["%s=%s" % (k, v) for k, v in hadoop_params.items()])

    jar_defs = {}
    jar_defs["mapreduce.framework.name"] = "yarn"
    jar_defs[
        "yarn.resourcemanager.address"] = settings.Param.yarn_resourcemanager
    jar_defs[
        "yarn.resourcemanager.scheduler.address"] = settings.Param.yarn_resourcemanager_scheduler
    jar_defs["fs.defaultFS"] = settings.Param.hdfs_root
    jar_defs["mapreduce.output.fileoutputformat.compress"] = "false"
    jar_defs_str = " ".join(["-D %s=%s" % (k, v) for k, v in jar_defs.items()])

    other_args = OrderedDict()
    other_args["similarityClassname"] = "SIMILARITY_EUCLIDEAN_DISTANCE"
    other_args["input"] = settings.Input.ratings.val
    other_args["usersFile"] = settings.Input.usersFile.val
    other_args["output"] = output_path
    other_args["tempDir"] = temp_path
    other_args_str = " ".join(
        ["--%s %s" % (k, v) for k, v in other_args.items()])

    line_num = get_the_line_of_transaction(settings.Input.ratings.val)

    if line_num > 0:
        cmd_str = '%s hadoop jar %s org.apache.mahout.cf.taste.hadoop.item.RecommenderJob %s %s' % \
                (hadoop_params_str, jar_file, jar_defs_str, other_args_str)
        print("Executing:")
        print(cmd_str)
        ret = cmd(cmd_str)
        if ret != 0:
            print("Job failed")
            sys.exit(ret)
    else:
        print "Collaborative Input Transaction Matrix is empty. Skip the calcuating."
    settings.Output.cl_result.val = output_path

    print("Done")
示例#14
0
def main():
    hr = HadoopRuntime()
    settings = hr.settings
    settings.Output.hdfs_path.val = settings.Param.data_path

    print("Done")