示例#1
0
def display_map_coot(inp1, inp2):
    ''' display map by coordinate & SF file.
    '''

    mtz, xyz_inp, sf_inp = get_file_type(inp1, inp2)

    if not xyz_inp or not sf_inp:
        print 'Error: File must be in MTZ or a mmcif file having _refln.pdbx_FWT\n'
        sys.exit()

    fp = open(sf_inp, 'r')
    sf_new = sf_inp + '__new'
    fw = open(sf_new, 'w')
    n = 0
    for x in fp:
        if '_refln.pdbx_r_free_flag' in x:  #change it to bypass a bug in CAD.
            fw.write('%s_x\n' % (x.rstrip()))
        else:
            if '_refln.pdbx_FWT' in x: n = n + 1
            fw.write(x)
    fw.close()

    add = ''
    if n > 0:  # a cif file with pdbx_FWT
        sf = prog.sf_convertor(xyz_inp, sf_new, 'mtz', add)
        arg = 'coot --pdb %s --mtz %s' % (xyz_inp, sf)
        os.system(arg)
    else:
        print 'Error: SF file should be the mmcif having _refln.pdbx_FWT.'

    util.delete_file(sf_new, sf_new + '.mtz', 'sf_information.cif',
                     'sf_format_guess.text')
示例#2
0
文件: ligand.py 项目: cypreeti/dcc
def cut_map_around_xyz(mapfile, peppdb_in, pepid):
    '''using several CCP4 until to cut map around the selected molecule
    color: [x800080] [xc00000] [xb0b0b0]
    '''

    mapout = pepid + '_cut.map'
    util.delete_file(mapout)
    '''
    xyzlim,xyzcomp=find_xyzlim_compound(pepid, peppdb_in)  
    mapscr=cut_map_bylimit(xyzlim)
    arg = mapfile + ' ' + ' ' +  mapout
    command="chmod +x %s ; ./%s  " %(mapscr, mapscr) + arg 
    os.system(command)
    
    return  mapout
    '''

    peppdb = peppdb_in
    if util.is_cif(peppdb_in): peppdb = cif.cif2pdb(peppdb_in)

    mapscr = cut_map_scr()
    arg = mapfile + ' ' + peppdb + ' ' + mapout
    command = "chmod +x %s ; ./%s  " % (mapscr, mapscr) + arg
    os.system(command)

    if util.is_cif(peppdb_in):
        util.delete_file('%s.PDB' % peppdb_in)
        peppdb = peppdb_in

    return mapout
示例#3
0
文件: ligand.py 项目: cypreeti/dcc
def map_info(mapfile):
    '''get the min, max, mean, sigma from the map
    '''

    min, max, mean, sigma = '-1', '-1', '-1', '-1'
    log = mapfile + '_header'
    scr = mapfile + '.sh'

    arg = 'mapdump mapin %s <<eof >%s \neof\n' % (mapfile, log)

    fw = open(scr, 'w')
    fw.write(arg)
    fw.close()
    os.system('chmod +x %s ; ./%s' % (scr, scr))

    if not util.check_file(10, log): return min, max, mean, sigma
    fp = open(log, 'r')
    for x in fp:
        if 'Minimum density ...' in x:
            min = x.rstrip().split(' ')[-1]
        elif 'Maximum density ...' in x:
            max = x.rstrip().split(' ')[-1]
        elif '    Mean density ..' in x:
            mean = x.rstrip().split(' ')[-1]

        elif 'deviation from mean density ..' in x:
            sigma = x.rstrip().split(' ')[-1]

    util.delete_file(scr, log)

    return min, max, mean, sigma
示例#4
0
def zip_one_file(dir, to_pack, zip_name):
    verify_path_exists(dir)
    # for the benefit of pigz, we have to cd to the directory, because
    # we don't control the name of the file inside created zip file - it's
    # the same as path of the file we're compressing
    curr_dir = os.getcwd()
    os.chdir(dir)
    verify_path_exists(to_pack)
    util.delete_file(zip_name)  # ensure destination doesn't exist
    try:
        # -11 for zopfil compression
        # --keep to not delete the source file
        # --zip to create a single-file zip archive
        # we can't control the name of the file pigz will create, so rename
        # to desired name after it's created
        pigz_dst = to_pack + ".zip"
        util.delete_file(pigz_dst)
        run_cmd_throw("pigz", "-11", "--keep", "--zip", to_pack)
        print("Compressed using pigz.exe")
        if pigz_dst != zip_name:
            print("moving %s => %s" % (pigz_dst, zip_name))
            shutil.move(pigz_dst, zip_name)
    except:
        # if pigz.exe is not in path, use regular zip compression
        zip_file(zip_name, to_pack, to_pack, compress=True)
        print("Compressed using regular zip")
    verify_path_exists(zip_name)
    os.chdir(curr_dir)
示例#5
0
def delete_question(question_id=None):
    try:
        user_id = data_manager.get_userid_by_username(session['username'])
    except KeyError:
        return redirect('/')
    if user_id == data_manager.get_foreign_key_by_id(
            data_manager.question_db, 'users_id', question_id)[0]['users_id']:
        data_manager.delete_line_by_foreign_id(data_manager.comment_db,
                                               'question_id', question_id)
        filename = 'static/image_for_question' + str(question_id) + '.png'
        if util.check_file(filename):
            util.delete_file(filename)
        answer_ids_to_delete = data_manager.get_ids_by_foreign_id(
            data_manager.answer_db, 'question_id', question_id)
        for answer_id in answer_ids_to_delete:
            data_manager.delete_line_by_foreign_id(data_manager.comment_db,
                                                   'answer_id',
                                                   answer_id['id'])
            filename = 'static/image_for_answer' + str(
                answer_id['id']) + '.png'
            if util.check_file(filename):
                util.delete_file(filename)
        data_manager.delete_answer_by_question_id(question_id)
        data_manager.delete_question(question_id)
    else:
        flash('Invalid user')
    return redirect('/list')
示例#6
0
 def decode(self, message):
     mask = int(message[0])  #mascara
     filename = message[2]  #nome do arquivo
     path = message[3]  #caminho
     if mask == notify.CREATE_DIR:  #se for uma mensagem de criar pasta, chama a função que cria pasta, e assim por diante, para todas as máscaras
         util.create_folder(path, filename)
     elif mask == notify.DELETE_DIR:
         util.delete_folder(path, filename)
     elif mask == notify.CREATE_FILE:
         util.create_file(path, filename)
     elif mask == notify.DELETE_FILE:
         util.delete_file(path, filename)
     elif mask == notify.MODIFY_FILE:
         BkpSync.flag_send = 1
         time.sleep(0.5)
         util.modify_file(path, filename, self.conn)
         time.sleep(1)
         BkpSync.flag_send = 0
     elif mask == notify.DIR_MOVED_FROM:
         util.delete_folder(path, filename)
     elif mask == notify.DIR_MOVED_TO:
         util.create_folder(path, filename)
     elif mask == notify.FILE_MOVED_FROM:
         util.delete_file(path, filename)
     elif mask == notify.FILE_MOVE_TO:
         BkpSync.flag_send = 1
         time.sleep(0.5)
         util.modify_file(path, filename, self.conn)
         time.sleep(1)
         BkpSync.flag_send = 0
示例#7
0
def zip_one_file(dir, to_pack, zip_name):
    verify_path_exists(dir)
    # for the benefit of pigz, we have to cd to the directory, because
    # we don't control the name of the file inside created zip file - it's
    # the same as path of the file we're compressing
    curr_dir = os.getcwd()
    os.chdir(dir)
    verify_path_exists(to_pack)
    util.delete_file(zip_name)  # ensure destination doesn't exist
    try:
        # -11 for zopfil compression
        # --keep to not delete the source file
        # --zip to create a single-file zip archive
        # we can't control the name of the file pigz will create, so rename
        # to desired name after it's created
        pigz_dst = to_pack + ".zip"
        util.delete_file(pigz_dst)
        run_cmd_throw("pigz", "-11", "--keep", "--zip", to_pack)
        print("Compressed using pigz.exe")
        if pigz_dst != zip_name:
            print("moving %s => %s" % (pigz_dst, zip_name))
            shutil.move(pigz_dst, zip_name)
    except:
        # if pigz.exe is not in path, use regular zip compression
        zip_file(zip_name, to_pack, to_pack, compress=True)
        print("Compressed using regular zip")
    verify_path_exists(zip_name)
    os.chdir(curr_dir)
示例#8
0
文件: ligand.py 项目: cypreeti/dcc
def isolate_connect_ligand(pdb, k, v):
    '''separate isolated and connected ligands
    pdb: the list of coordinate (all atoms)
    k: the chainID;  v: the list of residue numbers. 
    '''

    if len(v) == 1: return [v]

    idd = '%s_%s_all' % (k, v[0])
    natom, pdb_lig = get_subpdb(pdb, k, v, idd)

    tmp = [v[0]]
    for i, x in enumerate(v):
        if i == 0: continue
        n1, n2 = i - 1, i
        nc = connect(pdb_lig, n1, n2, k, v)
        if not nc: tmp.append(99999)
        tmp.append(x)

    ss = ''
    for x in tmp:
        ss = ss + ' %d' % x
    t1 = (ss.split('99999'))
    nres_list = []
    for x in t1:
        tt = x.split()
        nres_list.append([int(i) for i in tt])

    util.delete_file(pdb_lig)
    #    print(nres_list)
    return nres_list
示例#9
0
文件: ligand.py 项目: cypreeti/dcc
def map_around_compound(mapfile, coord, compid):
    '''cut the ASU map to the residue level.
    compid:  model_compound_chainID_resnumber_alt_insertion.
    mapfile: the CCP4 map in ASU.
    coord:   the coordinate file for the map (in cif/pdb)
    '''

    if (not util.check_file(100, mapfile) or not util.check_file(100, coord)):
        err = 'Error: Either mapfile or coordinate file does not exist.'
        config.ERRLOG.append(err)
        print(err)
        return

    xyzlim, xyzcomp = find_xyzlim_compound(compid, coord)

    if (len(xyzlim.strip()) < 2):
        err = 'Error: compound boundary in fraction not found. check with compound ID'
        config.ERRLOG.append(err)
        print(err)
        return

# below is cut map and get jmol
    t = compid.split('_')
    comp = '_'.join([t[0], t[1], t[2]])
    mapout = comp + '_cut.map'
    maphtml = comp + '.html'

    mapscr = cut_map_bylimit(xyzlim)
    util.delete_file(mapout)
    arg = mapfile + ' ' + ' ' + mapout
    command = "chmod +x %s ; ./%s  " % (mapscr, mapscr) + arg
    os.system(command)

    min, max, mean, sigma = map_info(mapfile)
    min1, max1, mean1, sigma1 = map_info(mapout)
    cont = {
        '0.5': 0.5,
        '0.7': 0.7,
        '1.0': 1.0,
        '1.5': 1.5,
        '2.0': 2.0
    }  #contour 4  map in asu.
    cont1 = cont  #contour 4 sub map.

    scale = 1.0
    if (float(sigma1) > 0):
        scale = float(sigma) / float(sigma1)
        for z in list(cont.keys()):
            cont1[z] = cont[z] * scale

    maphtml = get_html4jmol(comp, xyzcomp, mapout, cont1)

    return maphtml, mapout
示例#10
0
def build_installer_data(dir):
    src = os.path.join("mupdf", "fonts", "droid", "DroidSansFallback.ttf")
    dst = os.path.join(dir, "DroidSansFallback.ttf")
    if not os.path.exists(dst) or is_more_recent(src, dst):
        copy_to_dst_dir(src, dir)

    files = [["SumatraPDF-no-MuPDF.exe", "SumatraPDF.exe"],
             "DroidSansFallback.ttf", "libmupdf.dll", "npPdfViewer.dll",
             "PdfFilter.dll", "PdfPreview.dll", "uninstall.exe"]
    create_lzma_archive(dir, "InstallerData.dat", files)
    installer_res = os.path.join(dir, "sumatrapdf", "Installer.res")
    util.delete_file(installer_res)
示例#11
0
def md_to_html(src, dst):
	s = read_file_utf8(src)
	md_info = parse_md(s)
	body = markdown.markdown(md_info.s)
	tmpl = tmpl_for_src_path(src)
	#print("Found template: %s" % mdtmpl)
	title = md_info.meta_data["title"]
	#print(vars.keys())
	html = str(tmpl(title, body))
	util.delete_file(dst)
	print("wrote %s" % dst)
	write_file_utf8(dst, html)
示例#12
0
文件: prog.py 项目: cypreeti/dcc
def run_phenix(pdbfile, sffile, type1):
    '''run sub-programs of phenix
    '''
    outfile = 'phenix__%s.log' %type1
    util.delete_file(outfile)
    print ('\nDoing PHENIX calculation for (%s) ...' %type1)
    
    if type1 == 'xtriage' :
        arg="phenix.xtriage %s log=%s  >/dev/null" %(sffile, outfile)
        os.system(arg)
    
    return outfile
示例#13
0
文件: regen.py 项目: WMAT01/web-blog
def md_to_html(src, dst):
    s = read_file_utf8(src)
    md_info = parse_md(s)
    body = markdown.markdown(md_info.s)
    tmpl = tmpl_for_src_path(src)
    # print("Found template: %s" % mdtmpl)
    title = md_info.meta_data["title"]
    # print(vars.keys())
    html = str(tmpl(title, body))
    util.delete_file(dst)
    print("wrote %s" % dst)
    write_file_utf8(dst, html)
示例#14
0
def build_installer_data(dir):
  src = os.path.join("mupdf", "fonts", "droid", "DroidSansFallback.ttf")
  dst = os.path.join(dir, "DroidSansFallback.ttf")
  if not os.path.exists(dst) or is_more_recent(src, dst):
    copy_to_dst_dir(src, dir)

  files = [ ["SumatraPDF-no-MuPDF.exe", "SumatraPDF.exe"], "DroidSansFallback.ttf",
    "libmupdf.dll", "npPdfViewer.dll", "PdfFilter.dll", "PdfPreview.dll",
    "uninstall.exe"]
  create_lzma_archive(dir, "InstallerData.dat", files)
  installer_res = os.path.join(dir, "sumatrapdf", "Installer.res")
  util.delete_file(installer_res)
示例#15
0
文件: tlswater.py 项目: cypreeti/dcc
def compare_r_tls_water(pdbfile_inp, sffile):
    ''' compare the R factors with/without TLS correction for waters.
    Remove all the ANISOU and apply TLS again with/without water.
    '''

    pdbfile = rid_of_anisou(pdbfile_inp)

    pdbfile_new = find_tls_water(pdbfile)
    out2 = prog.run_dcc(pdbfile_new, sffile, '')

    util.delete_file(pdbfile)

    return
示例#16
0
def delete_question_image(question_id):
    try:
        user_id = data_manager.get_userid_by_username(session['username'])
    except KeyError:
        return redirect('/')
    if user_id == data_manager.get_foreign_key_by_id(
            data_manager.question_db, 'users_id', question_id)[0]['users_id']:
        filename = 'static/image_for_question' + str(question_id) + '.png'
        util.delete_file(filename)
        data_manager.update_image_data_by_id(data_manager.question_db,
                                             question_id, 'no image')
    else:
        flash('Invalid user')
    return redirect('/question/{}'.format(question_id))
示例#17
0
def build_installer_data(dir):
  src = os.path.join("mupdf", "resources", "fonts", "droid", "DroidSansFallback.ttf")
  if not os.path.exists(src):
    # location before https://code.google.com/p/sumatrapdf/source/detail?r=8266
    src = os.path.join("mupdf", "fonts", "droid", "DroidSansFallback.ttf")
  assert os.path.exists(src)
  dst = os.path.join(dir, "DroidSansFallback.ttf")
  if not os.path.exists(dst) or is_more_recent(src, dst):
    copy_to_dst_dir(src, dir)

  files = [ ["SumatraPDF-no-MuPDF.exe", "SumatraPDF.exe"], "DroidSansFallback.ttf",
    "libmupdf.dll", "npPdfViewer.dll", "PdfFilter.dll", "PdfPreview.dll",
    "uninstall.exe"]
  create_lzma_archive(dir, "InstallerData.dat", files)
  installer_res = os.path.join(dir, "sumatrapdf", "Installer.res")
  util.delete_file(installer_res)
示例#18
0
def build_installer_data(dir):
  src = os.path.join("mupdf", "resources", "fonts", "droid", "DroidSansFallback.ttf")
  if not os.path.exists(src):
    # location before https://code.google.com/p/sumatrapdf/source/detail?r=8266
    src = os.path.join("mupdf", "fonts", "droid", "DroidSansFallback.ttf")
  assert os.path.exists(src)
  dst = os.path.join(dir, "DroidSansFallback.ttf")
  if not os.path.exists(dst) or is_more_recent(src, dst):
    copy_to_dst_dir(src, dir)

  files = [ ["SumatraPDF-no-MuPDF.exe", "SumatraPDF.exe"], "DroidSansFallback.ttf",
    "libmupdf.dll", "npPdfViewer.dll", "PdfFilter.dll", "PdfPreview.dll",
    "uninstall.exe"]
  create_lzma_archive(dir, "InstallerData.dat", files)
  installer_res = os.path.join(dir, "sumatrapdf", "Installer.res")
  util.delete_file(installer_res)
示例#19
0
def sf_symmetry(sffile, pdbfile):
    ''' get the best space group by pointless.
    '''

    sfmtz = prog.sf_convertor(pdbfile, sffile, 'mtz', '')

    if not util.check_file(500, sfmtz):
        print("Error: MTZ file not generated, check symmetry/cell in sf file.")
        sys.exit()

    print('Getting the best space group by pointless...')
    out = prog.run_ccp4(pdbfile, sfmtz, 'pointless')

    print('For details, please see the output file =%s' % out)

    util.delete_file(sfmtz, 'sf_format_guess.text', 'sf_information.txt')
示例#20
0
文件: prog.py 项目: cypreeti/dcc
def run_ccp4(pdbfile, sffile, type1):
    '''run sub-programs of CCP4
    '''
    outfile = 'ccp4__%s.log' %type1
    util.delete_file(outfile)
    print ('\nDoing %s ...' %type1)
    
    if type1 == 'ctruncate' :
        arg='ctruncate -hklin %s -amplitudes -colin "/*/*/[FP,SIGFP]" -hklout ctruncate-SF.mtz>& %s' %(sffile,outfile)
#        arg='ctruncate -hklin %s -amplitudes -colano "/*/*/[I(+),SIGI(+),I(-),SIGI(-)]" -hklout ctruncate-SF.mtz>& %s' %(sffile,outfile)
        os.system(arg)
        
    elif type1 == 'pointless' :
        arg='pointless hklin %s > %s' %(sffile, outfile)
        os.system(arg)
        os.system('grep "Best Solution" %s ' %outfile)

    return outfile
示例#21
0
文件: ligand.py 项目: cypreeti/dcc
def get_subpdb(pdb, k, v, idd):
    '''pick the atoms in pdb (a list) for chain k in residue number v (a list)
    '''

    peppdb = '%s.pdb' % (idd)
    fwt = open(peppdb, 'w')

    natom = 0
    for x in pdb:
        if ('CRYST1' in x[:6] or 'SCALE' in x[:5]):
            fwt.write(x)
        elif (('ATOM' in x[:6] or 'HETATM' in x[:6]) and k == x[20:22].strip()
              and int(x[22:26]) in v):
            fwt.write(x)
            natom = natom + 1

    fwt.close()
    if natom < 2: util.delete_file(peppdb)  # ion
    return natom, peppdb
示例#22
0
def get_freer_flag(inp1, inp2):
    ''' find the correct free set in the mtz or mmcif file by exaustive test.
    '''

    idd, xyz_inp, sf_inp = get_file_type(inp1, inp2)

    if not xyz_inp or not sf_inp:
        print 'Error: Stopped searching the free set.  '
        print '       File must be in MTZ or a mmcif file having _refln.pdbx_r_free_flag\n'
        sys.exit()

    flist = open(sf_inp, 'r').readlines()

    out = xyz_inp + '_test_Rf'
    fw = open(out, 'w')
    tmp = 'set  reso  R_rep  Rf_rep  CC_rep    R_cal  Rf_cal  CC_cal \n'
    print(tmp)
    fw.write(tmp)

    mark = ' '
    for i in range(20):
        if idd == 1:  #sf is mtz
            add = ' -freer %d' % i
            sf = prog.sf_convertor(xyz_inp, sf_inp, 'mmcif', add)
        else:
            sf = cif2cif_sf(flist, i)

        dcc_out = prog.run_dcc(xyz_inp, sf, ' -one -no_xtriage ')
        val, rep, notls, tls, bstat, prog_rep = parse.values_from_dcc(dcc_out)
        if (notls[3] - notls[2]) > 0.02: mark = '*'
        arg = ' %2d  %s %s\n' % (i, val, mark)
        print(arg)
        fw.write(arg)
        if mark == '*': break
        util.delete_file(sf)
    fw.close()

    print('The statistics output file is =%s' % out)
    if mark == '*':
        print('The matched SF file is =%s\n' % sf)
    else:
        print('Warning: No proper free set is found! \n')
示例#23
0
def delete_answer(answer_id=None):
    try:
        user_id = data_manager.get_userid_by_username(session['username'])
    except KeyError:
        return redirect('/')
    question_id = data_manager.get_foreign_key_by_id(
        data_manager.answer_db, 'question_id', answer_id)[0]['question_id']
    if user_id == data_manager.get_foreign_key_by_id(data_manager.answer_db,
                                                     'users_id',
                                                     answer_id)[0]['users_id']:
        data_manager.delete_line_by_foreign_id(data_manager.comment_db,
                                               'answer_id', answer_id)
        question_id = data_manager.delete_answer_by_id(
            answer_id)['question_id']
        filename = 'static/image_for_answer' + str(answer_id) + '.png'
        if util.check_file(filename):
            util.delete_file(filename)
    else:
        flash('Invalid user')
    return redirect('/question/{}'.format(question_id))
示例#24
0
def run_async(command, exec_time_in_seconds):
    if util.valid_ip():
        try:
            temp_out_file_name = tempfile.NamedTemporaryFile().name
            with open(temp_out_file_name, "w") as fout:
                proc = subprocess.Popen(command, shell=True, stdout=fout)
                # wait for a few seconds
                time.sleep(exec_time_in_seconds)
                # stop the process
                proc.kill()

            # read contents of the file
            with open(temp_out_file_name, "rb") as fin:
                output = fin.read()

            # remove the temp file
            util.delete_file(temp_out_file_name)
            return util.replace_all(output, util.get_replace_dic())
        except subprocess.SubprocessError:
            proc.kill()
            outs, errs = proc.communicate()
            return errs
    else:
        return message.error_404_msg
示例#25
0
文件: main.py 项目: ACM-CUBA/box
def build_one_output(name, input_fn):
    output_fn = change_extension(input_fn, 'sol')
    delete_file(output_fn)
    result = run_solution(reference_solution(name), input_fn, output_fn)
    return result.status == 'OK'
示例#26
0
def test_delete_file():
    util.delete_file('z.txt')
示例#27
0
文件: run.py 项目: ACM-CUBA/box
def run_solution(sol_fn,
                 input_fn,
                 output_fn=None,
                 reference_fn=None,
                 time_limit=20,
                 mem_limit=1024,
                 file_limit=1024):
    if reference_fn is None:
        if output_fn is None:
            output_fn = '/dev/null'
    else:
        assert output_fn is None
        output_file = NamedTemporaryFile(delete=False)
        output_fn = output_file.name

    mem_limit = mem_limit * 1024 * 1024
    file_limit = file_limit * 1024
    pid = os.fork()
    if pid == 0:
        resource.setrlimit(resource.RLIMIT_CPU,
                           (time_limit + 1, time_limit + 1))
        resource.setrlimit(resource.RLIMIT_DATA, (mem_limit, mem_limit))
        resource.setrlimit(resource.RLIMIT_FSIZE, (file_limit, file_limit))
        with open(input_fn, 'r') as in_file:
            os.dup2(in_file.fileno(), 0)
        with open(output_fn, 'w') as out_file:
            os.dup2(out_file.fileno(), 1)
        with open('/dev/null', 'w') as err_file:
            os.dup2(err_file.fileno(), 2)
        os.execl(sol_fn, sol_fn)
    (pid, status, rusage) = os.wait4(pid, 0)
    # ranido changed
    #result = Result(running_time = rusage.ru_utime)
    result = Result(running_time=rusage.ru_utime + rusage.ru_stime)

    # ranido changed
    #if os.WIFSIGNALED(status) and os.WTERMSIG(status) == signal.SIGXCPU:
    if result.running_time > time_limit:
        result.status = 'TLE'
    elif os.WEXITSTATUS(status) != 0:
        result.status = 'RE'
        result.detail = 'failed'
        hints.give_hint('solution-failed')
        if os.WIFSIGNALED(status) and os.WTERMSIG(status) == signal.SIGSEGV:
            hints.give_hint('solution-SIGSEGV')
            result.detail = 'SIGSEGV'
        elif os.WIFSIGNALED(status) and os.WTERMSIG(status) == signal.SIGABRT:
            hints.give_hint('solution-SIGABRT')
            result.detail = 'SIGABRT'
    elif reference_fn:
        if subprocess.call(['diff', reference_fn, output_fn],
                           stdout=NamedTemporaryFile(),
                           stderr=NamedTemporaryFile()) == 0:
            result.status = 'AC'
        else:
            result.status = 'WA'
    else:
        result.status = 'OK'

    if reference_fn:
        delete_file(output_fn)
    return result
示例#28
0
def clean_backups(name):
    u"Delete backup files from problem `name`'s tree."
    for dirpath, _, backup_fns in os.walk('%s/' % name):
        for backup_fn in backup_fns:
            if backup_fn.endswith('~'):
                delete_file(os.path.join(dirpath, backup_fn))
示例#29
0
def set_file(path):
    util.delete_file(path)
    logging.basicConfig(format='%(levelname)s:%(message)s',
                        filename=path,
                        level=logging.DEBUG)
示例#30
0
 def drop(self):
     path = util.path_join("pages", self.name + ".json")
     util.delete_file(path)
     self.logger.info("delete page %s successfully" % self.name)
示例#31
0
for file in [
        "LICENSE", "Makefile", "pkginfo.in", "prototype.in", "icon.desktop",
        "start.sh", "vmoptions.README"
]:
    if not util.check_file_exists(os.path.join(util.get_script_path(), "data", args.ide, file)) and not \
            util.check_file_readable(os.path.join(util.get_script_path(), "data", args.ide, file)):
        logger.error("%s does not exist or is not readable." % file)
        cleanup(-1, logger)

# Download URL
if util.check_file_exists(
        os.path.join(util.get_script_path(), "tmp",
                     link.split("/")[-1])):
    if not util.delete_file(
            os.path.join(util.get_script_path(), "tmp",
                         link.split("/")[-1]), logger, False):
        cleanup(-1, logger)

resp = urllib.request.urlretrieve(
    link, os.path.join(util.get_script_path(), "tmp",
                       link.split("/")[-1]), util.progress_hook)
if resp is None or resp[1]["Connection"] != "close" or int(
        resp[1]["Content-Length"]) < 100000:
    logger.error("Error while downloading '%s'." %
                 os.path.join(util.get_script_path(), "tmp",
                              link.split("/")[-1]))
    cleanup(-1, logger)

if not util.run_cmd(
        "tar --strip-components 1 -C %s -zxf %s" %
示例#32
0
# Checking files
for file in ["control.in", "postinst", "sysctl-99.conf"]:
    if not util.check_file_exists(os.path.join(util.get_script_path(), "data", args.ide, "debian", file)) and not \
            util.check_file_readable(os.path.join(util.get_script_path(), "data", args.ide, "debian", file)):
        logger.error("%s does not exist or is not readable." % file)
        cleanup(-1, logger)

for file in ["LICENSE", "Makefile", "pkginfo.in", "prototype.in", "icon.desktop", "start.sh", "vmoptions.README"]:
    if not util.check_file_exists(os.path.join(util.get_script_path(), "data", args.ide, file)) and not \
            util.check_file_readable(os.path.join(util.get_script_path(), "data", args.ide, file)):
        logger.error("%s does not exist or is not readable." % file)
        cleanup(-1, logger)

# Download URL
if util.check_file_exists(os.path.join(util.get_script_path(), "tmp", link.split("/")[-1])):
    if not util.delete_file(os.path.join(util.get_script_path(), "tmp", link.split("/")[-1]), logger, False):
        cleanup(-1, logger)

resp = urllib.request.urlretrieve(link, os.path.join(util.get_script_path(), "tmp", link.split("/")[-1]), util.progress_hook)
if resp is None or resp[1]["Connection"] != "close" or int(resp[1]["Content-Length"]) < 100000:
    logger.error("Error while downloading '%s'." % os.path.join(util.get_script_path(), "tmp", link.split("/")[-1]))
    cleanup(-1, logger)

if not util.run_cmd("tar --strip-components 1 -C %s -zxf %s" %
                    (os.path.join(util.get_script_path(), "tmp", "root", "usr", "share", "jetbrains", args.ide),
                     os.path.join(util.get_script_path(), "tmp", link.split("/")[-1])), logger, False):
    logger.error("Error while unpacking '%s' to '%s'." %
                 (os.path.join(util.get_script_path(), "tmp", link.split("/")[-1])),
                 os.path.join(util.get_script_path(), "tmp", "root", "usr", "share", "jetbrains", args.ide))
    cleanup(-1, logger)
示例#33
0
def sf_quality(sffile, pdbfile):
    '''run ctruncate and analysis the result
    '''

    sf_tmp = sffile + '.mtz'

    sf_tmp1 = prog.sf_convertor(pdbfile, sffile, 'mmcif', '')
    sf_tmp = prog.sf_convertor(pdbfile, 'SF_4_validate.cif', 'mtz', '')
    if not util.check_file(500, sf_tmp):
        print(
            "Error: Conversion failed! Maybe your sf file has no symmetry/cell."
        )
        sys.exit()

    if pdbfile == '-xtriage':
        outfile = prog.run_phenix(pdbfile, sf_tmp, 'xtriage')

    elif pdbfile == '-ctruncate':  #do ctruncate
        outfile = prog.run_ccp4(pdbfile, sf_tmp, 'ctruncate')

    else:  #do ctruncate
        outfile = prog.run_ccp4(pdbfile, sf_tmp, 'pointless')

    if not util.check_file(500, outfile):
        print('Error: The log file (%s) is not generated' % outfile)
        sys.exit()

    fp = open(outfile, 'r')
    flist = fp.readlines()
    for i, x in enumerate(flist):  #for xtriage/ctruncate
        t = x.strip()
        if ('<|L|>:' in t[:6] or '<L^2>:' in t[:8]
                or 'L-test suggests' in t[:30]
                or 'twinning, twin fraction' in t):
            print(t)
        elif (t == 'Acentric reflections' or t == 'Centric reflections'
              or t == 'L test for acentric data'):
            print(t)
        elif (('<I^2>/<I>^2' in x or '<F>^2/<F^2>' in x or ' <|E^2 - 1|> ' in x
               or 'Mean |L|   :' in x or 'Mean  L^2  :' in x
               or 'L statistic = ' in x) and 'untwinned' in x
              and ' perfect twin' in x):
            print(t)
        elif (t == '$TABLE: Acentric moments of E:'
              or t == '$TABLE: Centric moments of E:'):
            print(x.split(':')[1])
        elif '$GRAPHS: 4th moment of E (Expected ' in x or ': 1st & 3rd moments of E (Expected ' in x:
            print(x.split(':')[1])

        elif 'ML estimate of overall B value of None:' in x:
            print('The ML estimate of overall B value = %s' % flist[i + 1])
        elif '[MaxAnisoB-MinAnisoB]/[MaxAnisoB' in x:
            print('%s' % x)
        elif 'pseudo)merohedral twin laws' in x:
            print('%s' % x)

        elif 'No significant pseudotranslation is detected' in x:
            print('No significant pseudotranslation is detected.\n')

    fp.seek(0)
    law = ''
    for x in fp:  #for plots
        if (' Acentric_theory ' in x and '_perfect_twin ' in x):  #Xtriage
            fname, start, end = outfile + '_Ltest.data', '$$', '$$'
            extract_data4plot(fp, fname, start, end, 'Ltest')
            plot_items(fname, 'twin', '', '', '', '')

        elif ('H test for possible twin law' in x):  #Xtriage
            law = x.split('law')[1].strip()
        elif ('H ' in x[:5] and 'Observed_S(H)' in x):  #Xtriage
            fname, start, end = outfile + '_Htest.data', '$$', '$$'
            extract_data4plot(fp, fname, start, end, 'Htest')
            title = 'H test for Acentric data (twin law =%s)' % law
            plot = '''  using 1:2 t "Observed data",  '' u 1:3 t "Untwinned by theory" ,  '' u 1:4 t "Fitted" '''
            plot_items(fname, 'twin', title, '|H|', '', plot)

        elif ('1/resol^2   <I/sigma_I>;_Signal_to_noise   $$' in x):  #Xtriage
            fname, start, end = outfile + '_I_sigI.data', '$$', '$$'
            extract_data4plot(fp, fname, start, end, 'Wilson')
            plot_items(fname, 'I/sigI', '', '', '', '')

        if (' Expected_untwinned ' in x
                and 'Expected_twinned' in x):  #Ctruncate
            fname, start, end = outfile + '_Ltest.data', '$$', '$$'
            extract_data4plot(fp, fname, start, end, 'Ltest')
            plot_items(fname, 'twin', '', '', '', '')

        elif ('|L|       N(|L|)  Untwinned    Twinned' in x):  #Pointless
            fname, start, end = outfile + '_Ltest.data', '$$', '$$'
            extract_data4plot(fp, fname, start, end, 'Ltest')
            plot_items(fname, 'twin', '', '', '', '')

    fp.close()

    print("\nThe output file = %s\n" % outfile)

    util.delete_file('sf_information.txt', 'sf_format_guess.text', sf_tmp,
                     sf_tmp1)
示例#34
0
文件: matt.py 项目: cypreeti/dcc
def matt_coeff(infile, outfile):
    '''calculate Matthew_coeff and solven content: file is in pdb format;
    '''

    if not util.check_file(100, infile):
        print('Error: file (%s) not exist' % infile)
        return

    file = infile
    if util.is_cif(infile): file = cif2pdb(infile)

    fp = open(file, 'r')

    cell = [1, 1, 1, 1, 1, 1]
    spt, nop, nmat, sym, res, atom, res = 1, 0, 1, 'X', [], [], []
    rmass, amass, armass = 0, 0, 0
    hetres, aname, rest = [], [], ''
    for x in fp:
        if 'REMARK 290 ' in x[:12] and '555' in x and ',' in x[23:32]:
            nop = nop + 1
        elif 'SEQRES' in x[:6]:
            t = x[17:79].split()
            res.extend(t)
#        elif 'SPLIT' in x[:6] :
#            t=x[6:].split()
#            spt=len(t)

        elif 'MTRIX3' in x[:6] and '1' not in x[55:].strip():
            nmat = nmat + 1
        elif 'CRYST1' in x[:6]:
            c = x[7:54].split()
            cell = [float(y) for y in c]
            sym = x[54:65].strip()

        elif ('ATOM' in x[:4] or 'HETA' in x[:4]):
            if ('HOH' in x[17:20] or 'DOD' in x[17:20]): continue

            atom.append(x)
            occ = float(x[54:60])
            amass = amass + atom_mass(x[76:78].strip()) * occ
            t = x[17:27]  #comp_ch_res_int
            aname.append(x[76:78].strip())

            if t != rest:
                comp = t[:3].strip()
                restmp = residue_mass(comp)
                if restmp < 1:
                    hetres.append(comp)
                else:
                    armass = armass + residue_mass(comp)

                rest = t
                aname = []

        elif 'ENDMDL' in x[:6]:
            break

    fp.close()

    cell_vol = cell_volume(cell)
    nsym = sg_nsym(sym)
    if nsym == -1:
        print('Error: space group (%s) is not in the list (%s)' % (sym, file))
        nsym = nop
#----------

    for x in hetres:
        armass = armass + non_standard_res(x, atom)

    for x in res:
        resm = residue_mass(x)
        if resm < 1:
            m1 = non_standard_res(x, atom)
            rmass = rmass + m1
        else:
            rmass = rmass + resm

    amatt, asolv = calc_matt(cell_vol, amass, nsym, nmat, spt)  #by atom, occ
    rmatt, rsolv = calc_matt(cell_vol, rmass, nsym, nmat, spt)  #by SEQRES
    armatt, arsolv = calc_matt(cell_vol, armass, nsym, nmat, spt)  #residue

    matt, solv = -1, -1

    if 2.0 < rmatt < 5:
        matt, solv = rmatt, rsolv
    elif 2.0 < armatt < 5:
        matt, solv = armatt, arsolv
    elif 2.0 < amatt < 5:
        matt, solv = amatt, asolv
    else:
        matt, solv = armatt, arsolv
        print(
            'Warning: packing problem (%s), Matthew_coeff=%.2f; Solvent=%.2f' %
            (file, matt, solv))

    if util.is_cif(infile): util.delete_file(file)

    print('%s : split nsym, nmat= %2d %2d %2d' % (file, spt, nsym, nmat))
    print('By ATOM:    matt= %6.2f , solvent= %6.2f ' % (amatt, asolv))
    print('By SEQRES:  matt= %6.2f , solvent= %6.2f ' % (rmatt, rsolv))
    print('By residue: matt= %6.2f , solvent= %6.2f ' % (armatt, arsolv))
    print('Possible:   matt= %6.2f , solvent= %6.2f ' % (matt, solv))

    print('\nmass_total_atom=%.1f ;  cell_vol=%.1f' % (amass, cell_vol))
    error = '?'
    if (matt > 8.7 or matt < 1.5):
        error = 'Warning: Matthew_coefficient(%.2f) is abnormal. Possible incomplete content of ASU (or a split entry).' % matt
    if matt == 0.0 and solv == 1.0: error = '?'  #space group problem

    if outfile:
        fw = open(outfile, 'w')
        ss = '''data_matt
#
_packing.Matthew_coefficient  %6.2f
_packing.solvent_content     %6.2f
_packing.error  "%s"\n
        ''' % (matt, solv, error)
        fw.write(ss)
        fw.close()
        print 'The output file = %s\n' % outfile
    return matt, solv
示例#35
0
def set_file(path):
    util.delete_file(path)
    logging.basicConfig(format='%(levelname)s:%(message)s',filename=path,level=logging.DEBUG)
示例#36
0
文件: ligand.py 项目: cypreeti/dcc
def get_subcif(dic, k, v):
    '''k is chainID;  v is list of residue number
    return idd, natom, ciffile name
    '''
    '''
    ciffile=dic['pdbfile']
    
    flist=open(ciffile, 'r').readlines()

    cell_items,values = cif.cifparse(flist, '_cell.')
    cell=cif.get_rows(cell_items, values)

    sym_items,values = cif.cifparse(flist, '_symmetry.')
    sym=cif.get_rows(sym_items, values)

    
    items,values = cif.cifparse(flist, '_atom_site.')
    comp=cif.parse_values(items,values,"_atom_site.auth_comp_id")
    asym=cif.parse_values(items,values,"_atom_site.auth_asym_id")
    seq=cif.parse_values(items,values,"_atom_site.auth_seq_id")
    alt=cif.parse_values(items,values,"_atom_site.label_alt_id")
    ins=cif.parse_values(items,values,"_atom_site.pdbx_PDB_ins_code");
    mod=cif.parse_values(items,values,"_atom_site.pdbx_PDB_model_num") 
    row=cif.get_rows(items, values)
  
    '''
    cell_items, cell = dic['cell_items'], dic['lig_cell']
    sym_items, sym = dic['sym_items'], dic['lig_sym']
    items, comp, asym, seq, alt, ins, mod, row = dic['items'], dic[
        'comp1'], dic['asym'], dic['seq'], dic['alt'], dic['ins'], dic[
            'mod'], dic['row']

    idd, natom, atom = '1_X_X_X__', 0, []

    for i in range(len(asym)):
        if asym and asym[i] == k and seq and int(seq[i]) in v:
            natom = natom + 1
            #print(i, natom, k, v[0],asym[i], seq[i], comp[i])
            alter, inst = '', ''
            if alt and alt[i] != '?' and alt[i] != '.': alter = alt[i]
            if ins and ins[i] != '?' and ins[i] != '.': inst = ins[i]
            atom.append(row[i])
            if natom == 1 and mod and comp:

                idd = '_'.join([mod[i], asym[i], comp[i], seq[i], inst, alter])

    subpep = '%s.cif' % (idd)  #write subcif name
    fw = open(subpep, 'w')

    fw.write('data_%s\n#\n' % idd)
    for i, p in enumerate(cell_items):
        if ' ' in cell[0][i]:
            fw.write("%s   '%s'\n" % (p, cell[0][i]))
        else:
            fw.write("%s   %s\n" % (p, cell[0][i]))

    fw.write('#\n')
    for i, p in enumerate(sym_items):
        if ' ' in sym[0][i]:
            fw.write("%s   '%s'\n" % (p, sym[0][i]))
        else:
            fw.write("%s   %s\n" % (p, sym[0][i]))

    fw.write('\n#\nloop_\n')
    for p in items:
        fw.write("%s\n" % p)
    for x in atom:
        for m in x:
            fw.write("%s " % m)
        fw.write('\n')

    fw.close()
    if natom < 2: util.delete_file(subpep)  # ion
    return idd, natom, subpep