예제 #1
0
    def list(self, request):
        action = request.GET.get('action')
        hadoop_settings = settings.get_hadoop_settings(["paths", "actions"])

        if action == 'check':
            running, stopped = check_hadoop_services(True)
            num, output = check_hadoop_applications()
            return Response(
                {
                    "running_services": running,
                    "stopped_services": stopped,
                    "number_of_running_applications": num,
                    "running_applications": output
                },
                status=status.HTTP_200_OK)
        if action == 'restart':
            restart = hadoop_settings["paths"]["hadoop_home"] + hadoop_settings[
                "paths"]["sbin"] + "/" + hadoop_settings["actions"]['stop']
            output = run_command(restart, True)
            action = 'start'

        action = hadoop_settings["paths"]["hadoop_home"] + hadoop_settings[
            "paths"]["sbin"] + "/" + hadoop_settings["actions"][action]
        output = run_command(action, True)

        return Response({"output": output}, status=status.HTTP_200_OK)
예제 #2
0
    def create(self, request):
        action = request.GET.get('action')
        hadoop_settings = settings.get_hadoop_settings(["paths", "actions"])

        if action == 'upload':
            path = request.data.get("path")
            dest = request.data.get("dest")
            files = request.FILES
            print files
            # Try to upload
            try:
                action = hadoop_settings["paths"][
                    "hadoop_home"] + hadoop_settings["paths"][
                        "bin"] + "/hadoop " + hadoop_settings["actions"][
                            action] + " " + path + " " + dest
                run_command(action)
            except:
                raise CustomRequestFailed(
                    "Something went wrong while trying to upload.")
        elif action == 'run':
            path = request.data.get("path")
            args = request.data.get("args")
            # Try to run
            try:
                action = hadoop_settings["paths"][
                    "hadoop_home"] + hadoop_settings["paths"][
                        "bin"] + "/hadoop " + hadoop_settings["actions"][
                            action] + " " + path + " " + args
                run_command(action)
            except:
                raise CustomRequestFailed(
                    "Something went wrong while trying to run the job.")
        return Response(status=status.HTTP_200_OK)
예제 #3
0
def build_project(project_dir, workspace, scheme):
    log(f"Installing CocoaPods for '{os.path.join(project_dir, workspace)}'")
    rv, out, err = run_command(["pod", "install", "--verbose"],
                               working_dir=project_dir)
    if rv != 0:
        log(f"Failed to install CocoaPod for '{project_dir}; output={out}, error={err}'"
            )
        return rv

    log(f"Building project '{os.path.join(project_dir, workspace)}'")
    build_command = [
        "xcodebuild",
        "build",
        "-workspace",
        workspace,
        "-scheme",
        scheme,
        "-sdk",
        "iphonesimulator",
    ]

    rv, out, err = run_command(build_command, working_dir=project_dir)
    if rv != 0:
        log(f"Failed to build '{project_dir}; output={out}, error={err}'")

    return rv
예제 #4
0
def create_archive(framework, project_file, build_for_device):
    if build_for_device:
        archive_path = f"{IOS_DEVICE_ARCHIVE_PATH}{framework}" 
        destination = "generic/platform=iOS"
    else:
        archive_path = f"{IOS_SIMULATOR_ARCHIVE_PATH}{framework}"
        destination = "generic/platform=iOS Simulator"
    cmd = [
        "xcodebuild",
        "archive",
        "-project",
        project_file,
        "-scheme",
        framework,
        "-destination",
        destination,
        "-archivePath",
        archive_path,
        "SKIP_INSTALL=NO",
        "BUILD_LIBRARY_FOR_DISTRIBUTION=YES"
    ] 
    
    (exit_code, out, err) = run_command(cmd, keepalive_interval=300, timeout=7200)
    if exit_code == 0:
        log(f"Created iOS archive {framework} {destination}")
    else:
        log(f"Could not create xcodebuild archive: {framework} output: {out}; error: {err}")
        sys.exit(exit_code)
예제 #5
0
def get_php_configs(ssh_path, user, host, debug=False):
    cmd = '"{0}" -ssh {1}@{2} "/usr/bin/php -i | grep \'upload_max_filesize\|post_max_size\'"'.format(ssh_path, user, host)
    if debug:
        print(cmd)
        
    output, error = functions.run_command(cmd, printError=debug)

    if error:
        return {'error': error}

    d = {}
    for var in output.strip().split('\n'):
        vals = var.split(' => ')
        d[vals[0]] = vals[1]

    return d
예제 #6
0
def map_framework_to_project(framework_list):
    framework_map = {}
    cmd = [
        "xcodebuild",
        "-project",
        "AWSiOSSDKv2.xcodeproj",
        "-list",
    ] 
    (exit_code, out, err) = run_command(cmd, keepalive_interval=300, timeout=7200)
    if exit_code == 0:
        log(f"List of schema found")
    else:
        log(f"Xcodebuild list failed: output: {out}; error: {err}")
        sys.exit(exit_code)

    for framework in framework_list:
        if framework not in str(out):
            framework_map[framework] = "./AWSAuthSDK/AWSAuthSDK.xcodeproj"
        else:
            framework_map[framework] = "AWSiOSSDKv2.xcodeproj"
    return framework_map
def create_checksum(archive_path, spm_manifest_repo, version):
    framework_to_checksum = {}
    for framework in xcframeworks:
        final_archive_name_with_ext = f"{framework}-{version}.zip"
        zipfile_path = os.path.join(archive_path, final_archive_name_with_ext)
        cmd = [
            "swift", "package", "--package-path", spm_manifest_repo,
            "compute-checksum", zipfile_path
        ]

        (exit_code, out, err) = run_command(cmd,
                                            keepalive_interval=300,
                                            timeout=7200)
        if exit_code == 0:
            logging.info(f"Created check sum for archive {framework} {out}")
        else:
            logging.error(
                f"Could not create checksum for archive: {framework} output: {out}; error: {err}"
            )
            sys.exit(exit_code)
        framework_to_checksum[framework] = out.decode("utf-8").rstrip()
    return framework_to_checksum
예제 #8
0
    if os.path.exists(xcframework):
        log(f"skipping {framework}...")
    else:
        log(f"Creating XCF for {framework}")
        
        cmd = [
                "xcodebuild",
                "-create-xcframework",
                "-framework",
                ios_device_framework,
                "-debug-symbols",
                ios_device_debug_symbols,
                "-framework",
                ios_simulator_framework,
                "-debug-symbols",
                ios_simulator_debug_symbols,
                "-output",
                xcframework
            ] 
        (exit_code, out, err) = run_command(cmd, keepalive_interval=300, timeout=7200)
        if exit_code == 0:
            log(f"Created XCFramework for {framework}")
        else:
            log(f"Could not create XCFramework: {framework} output: {out}; error: {err}")
            sys.exit(exit_code)

if os.path.exists(IOS_DEVICE_ARCHIVE_PATH):
    shutil.rmtree(IOS_DEVICE_ARCHIVE_PATH)
if os.path.exists(IOS_SIMULATOR_ARCHIVE_PATH):
    shutil.rmtree(IOS_SIMULATOR_ARCHIVE_PATH)
예제 #9
0
def process(bedgraph, junc, genome, output, strand, minjxncount, min_percent,
            min_reads, window_size):
    # === calculate window read counts. incorporate introns if --junc provided. ===
    gc_strand, outfile_temp_unionbg = ".", None  # @2020.10.10 by Zhang yiming - init variable
    if junc:
        if re.search(r'\.bed$', junc):
            logger.info('converting junction .bed file to .bedgraph {}',
                        str(datetime.now().time()))

            # --- get overlapping junction regions ---
            temp_jxn_intersect = output + '.temp.junction.intersect.txt'
            if strand == 0:
                temp_jxn_file = junc
                pb.BedTool(junc).genome_coverage(bg=True, g=genome).intersect(
                    wo=True, b=temp_jxn_file).saveas(temp_jxn_intersect)
            else:
                if strand == 1:
                    gc_strand = '+'
                elif strand == -1:
                    gc_strand = '-'

                # get this strand's junctions only & filter junctions by read counts
                temp_jxn_file = output + '.' + str(strand) + '.jxn.bed'
                o = open(temp_jxn_file, 'w')
                with open(junc, 'r') as f:
                    for line in f:
                        (chrom, start, end, name, cov,
                         strand) = line.rstrip().split('\t')
                        if strand == gc_strand and int(cov) >= minjxncount:
                            o.write(line)
                o.close()

                # get junction genomecov & intersect to get the # reads per genomecov region
                pb.BedTool(junc).genome_coverage(
                    bg=True, g=genome, strand=gc_strand).intersect(
                        wo=True, b=temp_jxn_file).saveas(temp_jxn_intersect)

            # --- get bedgraph of junction regions ---
            jxn2cov = {}
            with open(temp_jxn_intersect, 'r') as f:
                for line in f:
                    if strand == 0:
                        (cchrom, cstart, cend, ccov, jchrom, jstart, jend,
                         jname, jcov, overlap) = line.rstrip().split('\t')
                    else:
                        (cchrom, cstart, cend, ccov, jchrom, jstart, jend,
                         jname, jcov, jstrand,
                         overlap) = line.rstrip().split('\t')

                    jxn = ':'.join([cchrom, cstart, cend])
                    if jxn not in jxn2cov:
                        jxn2cov[jxn] = int(jcov)
                    else:
                        jxn2cov[jxn] += int(jcov)

            outfile_temp_jxn_bg_unsorted = output + '.' + str(
                strand) + '.jxn.bedgraph.unsorted'
            o = open(outfile_temp_jxn_bg_unsorted, 'w')
            for jxn in jxn2cov:
                o.write('\t'.join(jxn.split(':')) + '\t' + str(jxn2cov[jxn]) +
                        '\n')
            o.close()

            outfile_temp_jxn_bg = output + '.' + str(strand) + '.jxn.bedgraph'
            sort_bedfile(outfile_temp_jxn_bg_unsorted, outfile_temp_jxn_bg)

            # --- cleanup ---
            os.remove(temp_jxn_intersect)
            os.remove(outfile_temp_jxn_bg_unsorted)
            if strand != 0:
                os.remove(temp_jxn_file)

        elif re.search(r'\.bedgraph$', junc):
            outfile_temp_jxn_bg = junc
        else:
            logger.error(
                'EXIT: File extension not recognized. Please input .bed or .bedgraph --junc file'
            )
            sys.exit(1)

        # combine bedgraphs: using subprocess because pybedtools complains about unionbedg format not being bed
        logger.info('merging junctions with bedgraph {}',
                    str(datetime.now().time()))
        outfile_temp_unionbg = output + '.unionbedg'
        j = open(outfile_temp_unionbg, 'w')
        cmd = ['bedtools', 'unionbedg', '-i', bedgraph, outfile_temp_jxn_bg]
        stdout, stderr = run_command(cmd, stdoutfile=j)
        j.close()
        os.remove(outfile_temp_jxn_bg)

        # write output
        outfile_temp_merge_bg_jxn = output + '.bg_jxn.bedgraph'
        m = open(outfile_temp_merge_bg_jxn, 'w')
        with open(outfile_temp_unionbg, 'r') as f:
            for line in f:
                (chrom, start, end, count1, count2) = line.split('\t')
                if '+' in count1 or '+' in count2:
                    count1 = scientific2int(count1) if '+' in count1 else int(
                        count1)
                    count2 = scientific2int(count2) if '+' in count2 else int(
                        count2)
                m.write('\t'.join(
                    map(str, [chrom, start, end,
                              int(count1) + int(count2)])) + '\n')
        m.close()

        logger.info('calculating window read counts: {}',
                    str(datetime.now().time()))
        window2count_length, chr2starts, chr2ends = window_count(
            outfile_temp_merge_bg_jxn, window_size)
    else:
        logger.error(
            'No --junc file was input. It is recommended to include this input.'
        )

        logger.info('calculating window read counts: {}',
                    str(datetime.now().time()))
        window2count_length, chr2starts, chr2ends = window_count(
            bedgraph, window_size)
        outfile_temp_merge_bg_jxn = bedgraph

    logger.info('{} total windows {}', len(window2count_length.keys()),
                str(datetime.now().time()))

    # === merge consecutive windows ===
    logger.info('merging consecutive windows: {}', str(datetime.now().time()))
    outfile_temp_windows = output + '.windows'
    outfile_temp_bg_merged = output + '.bg.merged'
    transcripts_trimmed = merge_windows(window2count_length, min_percent,
                                        min_reads, outfile_temp_merge_bg_jxn,
                                        outfile_temp_bg_merged,
                                        outfile_temp_windows)

    if transcripts_trimmed != 0:
        logger.info('total transcripts after merging {}',
                    len(transcripts_trimmed))

        # === get strand ===
        if strand == 1:
            strand = "+"
        elif strand == -1:
            strand = "-"
        elif strand == 0:
            strand = 0
        else:
            logger.error('DIED: did not recognize strand {}', strand)
            sys.exit()

        # === print output -> unsorted bed format ===
        logger.info("printing output (unsorted): {}",
                    str(datetime.now().time()))
        transcripts_trimmed_list = list(sorted(transcripts_trimmed.keys()))

        o = open(output, 'w+')
        for ind, tx in enumerate(transcripts_trimmed_list):
            (chrom, start, end) = tx
            # if not chrom.startswith('chr'):
            # 	chrom = 'chr' + chrom
            if strand != 0:
                o.write('\t'.join([
                    chrom,
                    str(start),
                    str(end), 'tx_' + str(ind), '0', strand
                ]) + '\n')
            else:
                o.write('\t'.join(
                    [chrom, str(start),
                     str(end), 'tx_' + str(ind)]) + '\n')
        o.close()

        # === output: sorted bed format & delete the temp unsorted file ===
        sort_bedfile(output, output, sort_by_bedtools=True)

    # === clean up ===
    os.remove(outfile_temp_windows)
    if junc:
        if outfile_temp_unionbg:
            os.remove(outfile_temp_unionbg)
        os.remove(outfile_temp_merge_bg_jxn)
예제 #10
0
user = sys.argv[1]
token = sys.argv[2]
title = sys.argv[3]
body = sys.argv[4]
base = sys.argv[5]
head = sys.argv[6]
github_org = sys.argv[7]
repo = sys.argv[8]

log(f"Creating pull request for {repo}")

auth_user = f"{user}:{token}"

data = f'{{"title":"{title}","base":"{base}","head":"{head}", "body":"{body}"}}'
log(f"Pull request data: {data}")

repo_url = f"https://api.github.com/repos/{github_org}/{repo}/pulls"

exit_code, out, err = run_command([
    "curl", "--user", auth_user, "--request", "POST", "--data", data, repo_url
])

if exit_code == 0:
    log(f"Created pull request for {repo}")
else:
    log(f"Could not create pull request for {repo}: output: {out}; error: {err}"
        )

sys.exit(exit_code)
import datetime
import sys

from functions import log, run_command

awsprofile = sys.argv[1]
distribution_id = sys.argv[2]
path = sys.argv[3]
invalidation = '{{"Paths":{{"Quantity":1,"Items":["/{0}"]}},"CallerReference":"{0}{1}"}}'.format(
    path, datetime.datetime.now())

exit_code, out, err = run_command(
    ["aws", "configure", "set", "preview.cloudfront", "true"])
if exit_code != 0:
    log(f"Failed to configure preview.cloudfront; output={out}, error={err}'")
    sys.exit(exit_code)

exit_code, out, err = run_command([
    "aws",
    "cloudfront",
    "create-invalidation",
    "--distribution-id",
    distribution_id,
    "--invalidation-batch",
    invalidation,
    "--profile",
    awsprofile,
])

if exit_code != 0:
    log(f"Failed to create invalidation; output={out}, error={err}'")
예제 #12
0
from framework_list import frameworks
from functions import log, run_command

log("Publishing CocoaPods")

for framework in frameworks:
    log(f"Publishing {framework}")

    # Most pods take a few minutes to build, and a few seconds to push to trunk. However, the
    # AWSiOSSDK podspec can take a long time to build, since it builds each dependent pod as
    # part of its linting process, so set the timeout accordingly.
    (exit_code, out, err) = run_command(
        [
            "bundle", "exec", "pod", "trunk", "push", f"{framework}.podspec",
            "--allow-warnings", "--synchronous"
        ],
        keepalive_interval=300,
        timeout=3600,
    )

    if exit_code != 0 and "Unable to accept duplicate entry for" in str(out):
        log(f"Already published {framework}")
    elif exit_code == 0:
        log(f"Published {framework}")
    else:
        log(f"Could not publish {framework}: output: {out}; error: {err}")
        sys.exit(exit_code)

    if framework == "AWSCore":
        log(f"pod repo update after {framework}")
        (exit_code, out, err) = run_command(
import os
import sys
from shutil import copyfile

from functions import log, run_command

root = sys.argv[1]
dest = sys.argv[2]
files = {
    "LICENSE": "LICENSE",
    "LICENSE.APACHE": "LICENSE.APACHE",
    "NOTICE": "NOTICE",
    "README.md": "README.md",
    "CircleciScripts/src/README.html": "src/source.html",
    "CircleciScripts/samples/README.html": "samples/samples.html",
}

for source, target in files.items():
    s = os.path.join(root, source)
    t = os.path.join(dest, target)
    target_dir = os.path.dirname(t)
    exit_code, out, err = run_command(["mkdir", "-p", target_dir])
    if exit_code != 0:
        log(f"Failed to make directory '{target_dir}'; output={out}, error={err}"
            )

    copyfile(s, t)