コード例 #1
0
def correlate(args):
    force = args.force
    tool = args.wig_tool
    db_file = args.db_file
    db = chipqc_db.ChipQcDbSqlite(path=db_file)

    limit = -1
    if 'limit' in args and args.limit != None:
        limit = args.limit


## Find required Correlation to run
    corrList = list()
    if 'correlation_id' in args and args.correlation_id != None:
        corrList.append(args.correlation_id)
        corrList = _loadSelectedJobs(db,
                                     force=force,
                                     correlations=corrList,
                                     limit=limit)
    else:
        corrList = _loadJobList(db, force=force, limit=limit)

    print "Processing %s jobs ..." % (len(corrList))
    reslist = _processJobs(tool, corrList,
                           lambda id, y: _storeValue(db, id, y), executeCmd)
コード例 #2
0
ファイル: load_samples.py プロジェクト: makostadima/chip-qc
def run(parser, args):
    db_file = args.db_file
    db = chipqc_db.ChipQcDbSqlite(path=db_file)

    if 'fl_file' in args and args.fl_file is not None:
        load(db, args.fl_file)
    elif 'at_file' in args and args.at_file is not None:
        annotateFile(db, args.at_file)
    else:
        parser.print_help()
コード例 #3
0
ファイル: coverage_mean.py プロジェクト: makostadima/chip-qc
def calculate(args):
    force = args.force
    tool = args.wig_tool
    db_file = args.db_file
    db = chipqc_db.ChipQcDbSqlite(path=db_file)
    col = args.annot_col
    out_dir = args.out_dir

    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    ## Find all jobs (based on annotation)
    updateDatabase(db, col, out_dir)

    ## Load all jobs
    allJobs = db.getCoverage()

    ## Find required Jobs to run

    jobs = allJobs

    limit = -1
    if 'limit' in args and args.limit != None:
        limit = args.limit

    if 'job_id' in args and args.job_id != None:
        max_id = args.job_id + 1
        if limit > 0:
            max_id = args.job_id + limit
        jobs = [r for r in jobs if r[0] >= args.job_id and r[0] < max_id]

    if args.l_jobs:
        printStatus(db, jobs)
        return
    if args.l_details:
        printStatus(db, jobs, details=True)
        return

    jobs = [r for r in jobs if force or r[3] == 'init']  ## status is init

    print "Processing %s jobs ..." % (len(jobs))

    ## Build commands
    cmdIdx = _createCommandIdx(
        db, tool,
        getBedFile(genome=args.genome, prefix=args.chr_exist == "True"), jobs)

    ## Execute
    reslist = map(
        lambda id: executeCmd(cmdIdx[id], lambda x: _storeValue(db, id, x)),
        cmdIdx.keys())
コード例 #4
0
ファイル: filter_samples.py プロジェクト: makostadima/chip-qc
def run(parser, args):
    args.out_dir = os.path.abspath(args.out_dir)
    db_file = args.db_file
    db = chipqc_db.ChipQcDbSqlite(path=db_file)
    if args.skip:
        skipFilter(db, args)
    elif 'reg_build' not in args or args.reg_build is None:
        print("Regulatory build parameter required ")
        return 1
    else:
        ret = filter(db, args)
        if ret != 0:
            print("Error: there were errors during execution !!!")
    return 0
コード例 #5
0
def runScreenshots(args):
    db_file = args.db_file
    out_dir = args.out_dir
    r_file = getRFile()
    bed_file = os.path.abspath(args.bed_file)
    db = chipqc_db.ChipQcDbSqlite(path=db_file)

    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    allJobs = db.getFiles()
    jobs = allJobs

    limit = -1
    if 'limit' in args and args.limit != None:
        limit = args.limit

    if 'job_id' in args and args.job_id != None:
        max_id = args.job_id + 1
        if limit > 0:
            max_id = args.job_id + limit
        jobs = [r for r in jobs if r[0] >= args.job_id and r[0] < max_id]


## Print INFO
    if args.l_jobs:
        printStatus(db, jobs)
        return

    print "Processing %s jobs ..." % (len(jobs))

    ## Build commands
    cmdIdx = _createCommandIdx(db,
                               r_file,
                               out_dir,
                               bed_file,
                               jobs,
                               grch37=args.rel_37)

    ## Execute
    reslist = map(
        lambda id: executeCmd(cmdIdx[id], lambda x: _storeValue(db, id, x)),
        cmdIdx.keys())
    return None
コード例 #6
0
def run(parser, args):
    db_file = args.db_file
    db = chipqc_db.ChipQcDbSqlite(path=db_file)
    if args.f_file:
        filterIds(db)
    elif args.f_file_detail:
        filterDetailsIds(db)
    elif args.l_file:
        loadedIds(db)
    elif args.la_file:
        loadedAnnotIds(db)
    elif args.c_list:
        correlationIds(db)
    elif args.C_list:
        correlationSampleIds(db)
    elif 'detail_id' in args and args.detail_id != None:
        correlationDetails(db, args.detail_id)
    else:
        print "One option required!!!"
        parser.print_help()
        return 1
    return 0
コード例 #7
0
def analyseEnrichment(args):
    db_file = args.db_file
    force = args.force
    out_dir = args.out_dir
    col = args.annot_col
    countAnnot = args.annot_cnt
    r_file = getRFile()
    db = chipqc_db.ChipQcDbSqlite(path=db_file)

    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    ## Find all jobs (based on annotation)
    updateDatabase(db, col, out_dir)

    allJobs = db.getEnrichment()
    jobs = allJobs

    limit = -1
    if 'limit' in args and args.limit != None:
        limit = args.limit

    if 'job_id' in args and args.job_id != None:
        max_id = args.job_id + 1
        if limit > 0:
            max_id = args.job_id + limit
        jobs = [r for r in jobs if r[0] >= args.job_id and r[0] < max_id]


## Print INFO
    if args.l_jobs:
        printStatus(db, countAnnot, jobs, details=False)
        return
    elif args.l_details:
        printStatus(db, countAnnot, jobs, details=True)
        return

    jobs = [r for r in jobs if force or r[4] == 'init']  ## status is init

    print "Processing %s jobs ..." % (len(jobs))

    ## Build commands
    cmdIdx = _createCommandIdx(db, r_file, countAnnot, jobs)

    ## Execute
    reslist = map(
        lambda id: executeCmd(cmdIdx[id], lambda x: _storeValue(db, id, x)),
        cmdIdx.keys())

    ## TODO implement
    ## Calculate mean value for each BIN (done before
    ## INPUT
    ##   -> calculate mean
    ## IP files
    ##   -> calculate mean
    ##
    ## wiggletools apply_paste mean.txt meanI regions.chr.bed BW

    ## execute Enrichment R script to
    ##  -> Plot + result file
    ##  -> read result file and put into DB

    #    cmdTemplate=" Rscript {0} --db {1} --out {2}"

    #    cmd=cmdTemplate.format(r_file,db_file,out_dir)
    #    res = execCmd(cmd)

    #    start=res[0]
    #    end=res[1]
    #    exitVal = res[2]
    #    out = res[3]
    #    err = res[4]

    #    print (res)
    return None