Esempio n. 1
0
    def editModelFile(self):
        # copy imodel.py of the selected version to the sandbox
        e = self.models.selEndpoint()
        v = self.models.selVersion()
        vdir = wkd + '/' + e + '/version%0.4d/' % int(v)
        zdir = wkd + '/' + e + '/version0000/'

        if (vdir != zdir):
            try:
                shutil.copy(
                    vdir + 'imodel.py', zdir
                )  # copy imodel.py to the sandbox. This will be the base version for build
            except:
                tkMessageBox.showerror("Error Message",
                                       "Unable to access source imodel.py")
                return

            removefile(
                zdir + 'info.pkl'
            )  # remove imodel.py from the sandbox to force model rebuilding

            self.skipUpdate = True
            self.models.chargeData()
            self.model.setFocus(e, v)

        # launch idle with the imodel.py of the sandbox
        try:
            subprocess.Popen(['/usr/bin/idle', zdir + 'imodel.py'])
        except:
            tkMessageBox.showerror("Error Message", "Unable to edit imodel.py")
            pass

        self.buildModel.delete(0, END)
        self.buildModel.insert(0, '<edited model (save first)>')
Esempio n. 2
0
def get_svg():
    import time
    #time.sleep(10)
    paths = json.loads(request.args["paths"][0], object_hook=connections.Link.object_hook)
    handle1 = request.args["handle1"][0]
    handle2 = request.args["handle2"][0]
    lines = connections.generate_graph(dbdata, paths, handle1, handle2)
    lines = [line+"\n" for line in lines]
    #print(lines)
    use_tempfiles = True
    if use_tempfiles:
        utils.removefile("temp.dot")
        utils.removefile("temp.png")
        open("temp.dot","w").writelines(lines)
        # -Goverlap=true -Gsplines=true
        p = subprocess.Popen("dot  -Gcenter=true -T svg temp.dot -o temp.svg", shell=True)
        p.wait()
        #stdout_data = open("temp.svg","r", encoding="iso8859-1").read()
        stdout_data = open("temp.svg","r").read()
        i = stdout_data.find("<svg")
        stdout_data = stdout_data[i:].encode("utf-8")
        #utils.removefile("temp.dot")
        #utils.removefile("temp.png")
    else:
        p = subprocess.Popen("dot -Goverlap=scale -T svg", shell=True, 
            stdin=subprocess.PIPE, stdout=subprocess.PIPE)
        (stdout_data,stderr_data) = p.communicate("\n".join(lines).encode("utf-8"))
    return Response(stdout_data,"text/svg") 
Esempio n. 3
0
    def predict(self):        
        name    = self.seeds[0]
        version = self.seeds[1]
        series  = self.series

        removefile ('/var/tmp/results.txt')
        
        mycommand=[wkd+'/predict.py','-e',name,'-v',version,'-f', series, '-g']
            
        try:
            proc = subprocess.Popen(mycommand,stdout=subprocess.PIPE)
            self.q.put ('PROCESS '+str(proc.pid))
            
        except:
            self.q.put ('ERROR: Predict process failed')
            return
 
        for line in iter(proc.stdout.readline,''):
            
            line = line.rstrip()
            if line.startswith('ERROR:'):
                self.q.put (line)
                return

        if proc.wait() == 1 :
            self.q.put ('ERROR: Unknown error')
            return
            
        self.q.put('Predict completed OK '+ name + ' ' + version + ' ' + series)
Esempio n. 4
0
def test2(testconf):
    """测试上传下载文件

    异常情况:下载文件后,往文件后追加内容,这时两个文件的 md5 肯定会
    不一样。

    """
    sgw_addr = testconf.sgw_addr
    sgw_port = testconf.sgw_port
    old_name = "test2.1"
    new_name = "test2.2"

    utils.removefile(old_name)
    utils.removefile(new_name)
    test1_stage0(old_name)
    test1_stage1(sgw_addr, sgw_port, old_name)
    test1_stage2(sgw_addr, sgw_port, old_name, new_name)

    rc = test1_stage3(old_name, new_name)
    if rc:
        utils.xor(new_name, 0)
        rc = test1_stage3(old_name, new_name)
        if rc:
            # new_name 的内容已更改,下载文件和源文件的 md5 不应该再相同
            return False
        else:
            utils.xor(new_name, 0)
            rc = test1_stage3(old_name, new_name)
            return rc
    else:
        return False
Esempio n. 5
0
    def editModelFile(self):
        # copy imodel.py of the selected version to the sandbox
        e = self.models.selEndpoint()
        v = self.models.selVersion()
        vdir = wkd + '/' + e + '/version%0.4d/'%int(v)
        zdir = wkd + '/' + e + '/version0000/'

        if (vdir!=zdir):
            try:
                shutil.copy(vdir+'imodel.py', zdir)   # copy imodel.py to the sandbox. This will be the base version for build 
            except:
                tkMessageBox.showerror("Error Message", "Unable to access source imodel.py")
                return

            removefile (zdir+'info.pkl')  # remove imodel.py from the sandbox to force model rebuilding
            
            self.skipUpdate=True
            self.models.chargeData()
            self.model.setFocus(e,v)

        # launch idle with the imodel.py of the sandbox
        try:
            subprocess.Popen(['/usr/bin/idle',zdir+'imodel.py'])
        except:
            tkMessageBox.showerror("Error Message", "Unable to edit imodel.py")
            pass
            
        self.buildModel.delete(0, END)
        self.buildModel.insert(0, '<edited model (save first)>')
Esempio n. 6
0
    def predict(self):
        name = self.seeds[0]
        version = self.seeds[1]
        series = self.series

        removefile('/var/tmp/results.txt')

        mycommand = [
            wkd + '/predict.py', '-e', name, '-v', version, '-f', series, '-g'
        ]

        try:
            proc = subprocess.Popen(mycommand, stdout=subprocess.PIPE)
            self.q.put('PROCESS ' + str(proc.pid))

        except:
            self.q.put('ERROR: Predict process failed')
            return

        for line in iter(proc.stdout.readline, ''):

            line = line.rstrip()
            if line.startswith('ERROR:'):
                self.q.put(line)
                return

        if proc.wait() == 1:
            self.q.put('ERROR: Unknown error')
            return

        self.q.put('Predict completed OK ' + name + ' ' + version + ' ' +
                   series)
def Run(vars, log):
    """
    Rebuilds the system initrd, on first install or in case the
    hardware changed.
    """

    log.write("\n\nStep: Rebuilding initrd\n")

    # make sure we have the variables we need
    try:
        SYSIMG_PATH = vars["SYSIMG_PATH"]
        if SYSIMG_PATH == "":
            raise ValueError("SYSIMG_PATH")

        PARTITIONS = vars["PARTITIONS"]
        if PARTITIONS == None:
            raise ValueError("PARTITIONS")

    except KeyError as var:
        raise BootManagerException(
            "Missing variable in vars: {}\n".format(var))
    except ValueError as var:
        raise BootManagerException(
            "Variable in vars, shouldn't be: {}\n".format(var))

    # mkinitrd needs /dev and /proc to do the right thing.
    # /proc is already mounted, so bind-mount /dev here
    # xxx tmp - trying to work around the f14 case:
    # check that /dev/ is mounted with devtmpfs
    # tmp - sysexec_noerr not returning what one would expect
    # if utils.sysexec_noerr ("grep devtmpfs /proc/mounts") != 0:
    utils.sysexec_noerr("mount -t devtmpfs none /dev")
    utils.sysexec("mount -o bind /dev {}/dev".format(SYSIMG_PATH))
    utils.sysexec("mount -t sysfs none {}/sys".format(SYSIMG_PATH))

    initrd, kernel_version = systeminfo.getKernelVersion(vars, log)
    try:
        utils.removefile("{}/boot/{}".format(SYSIMG_PATH, initrd))
    except:
        log.write("{}/boot/{} is already removed\n".format(
            SYSIMG_PATH, initrd))

    # hack for CentOS 5.3
    bypassRaidIfNeeded(SYSIMG_PATH, log)
    # specify ext3 for fedora14 and above as their default fs is ext4
    utils.sysexec_chroot(
        SYSIMG_PATH,
        "mkinitrd -v --with=ext3 --allow-missing /boot/initrd-{}.img {}".
        format(kernel_version, kernel_version), log)

    utils.sysexec_noerr("umount {}/sys".format(SYSIMG_PATH), log)
    utils.sysexec_noerr("umount {}/dev".format(SYSIMG_PATH), log)
Esempio n. 8
0
def test5(testconf):
    """下载大小为零的文件"""
    filename = "test5.1"
    utils.removefile(filename)
    test1_stage0(filename, filesize=0)
    test1_stage1(testconf.sgw_addr, testconf.sgw_port, filename)
    utils.removefile(filename)
    rc = test3_download_file(testconf.sgw_addr, testconf.sgw_port, filename)
    if rc:
        filesize = os.path.getsize(filename)
        return filesize == 0
    else:
        return False
Esempio n. 9
0
    def predict (self, molFile, molName, molCharge, detail, clean=True):

        # default return values
        molPR=molCI=molAD=(False,0.0)

##        success, molMD = self.computeLogP (molFile)

        success, molMD = computeLogP (molFile)
        success, molCharge = self.computeCharge (molFile)
        
        if not success: return (molPR,molAD,molCI)

        success, pr  = self.computePrediction (molMD,molCharge)
        molPR = (success, pr)
        if not success: return (molPR,molAD,molCI)

        if clean: removefile (molFile)
            
        return (molPR,molAD,molCI)
Esempio n. 10
0
    def predict(self, molFile, molName, molCharge, detail, clean=True):

        # default return values
        molPR = molCI = molAD = (False, 0.0)

        ##        success, molMD = self.computeLogP (molFile)

        success, molMD = computeLogP(molFile)
        success, molCharge = self.computeCharge(molFile)

        if not success: return (molPR, molAD, molCI)

        success, pr = self.computePrediction(molMD, molCharge)
        molPR = (success, pr)
        if not success: return (molPR, molAD, molCI)

        if clean: removefile(molFile)

        return (molPR, molAD, molCI)
Esempio n. 11
0
def test1(testconf):
    """测试上传下载文件

    0. 生成测试文件
    1. 上传文件
    2. 下载文件
    3. 比较文件

    """
    sgw_addr = testconf.sgw_addr
    sgw_port = testconf.sgw_port
    old_name = "test1.1"
    new_name = "test1.2"

    utils.removefile(old_name)
    utils.removefile(new_name)
    test1_stage0(old_name)
    test1_stage1(sgw_addr, sgw_port, old_name)
    test1_stage2(sgw_addr, sgw_port, old_name, new_name)
    return test1_stage3(old_name, new_name)
Esempio n. 12
0
 def export_file(self,
                 source,
                 target,
                 schema,
                 pbar,
                 group=False,
                 feature=None,
                 dicolist=None,
                 diconame=None):
     if self.overwrite:
         removefile(target)
     with fiona.open(path=target,
                     mode='w',
                     schema=schema,
                     driver=self.dest_format,
                     crs=self.dest_epsg,
                     encoding='UTF-8') as output:
         if group:
             source = self.read()
         for ft in source:
             if group:
                 if ft['properties'][diconame] not in dicolist:
                     if ft['properties'][diconame] == feature['properties'][
                             diconame]:
                         tf = self.transform_feature(
                             feature=ft,
                             orig_proj=self.src_proj,
                             dest_proj=self.dest_proj)
                         output.write(tf)
                         pbar.update()
             else:
                 tf = self.transform_feature(feature=ft,
                                             orig_proj=self.src_proj,
                                             dest_proj=self.dest_proj)
                 output.write(tf)
                 pbar.update()
         if group:
             source.close()
Esempio n. 13
0
def test3(testconf):
    """测试按偏移下载文件

    0. 生成测试文件,大小不为零
    1. 上传文件
    2. 使用偏移下载文件的接口进行下载
    3. 比较生成的文件和下载回来的文件
    """
    sgw_addr = testconf.sgw_addr
    sgw_port = testconf.sgw_port
    old_name = "test3.1"
    new_name = "test3.2"

    utils.removefile(old_name)
    utils.removefile(new_name)
    test1_stage0(old_name)
    test1_stage1(sgw_addr, sgw_port, old_name)

    shutil.move(old_name, new_name)
    rc = test3_download_file(sgw_addr, sgw_port, old_name)
    if rc:
        return test1_stage3(old_name, new_name)
    else:
        return False
Esempio n. 14
0
def test4(testconf):
    """上传大小为零的文件"""
    filename = "test4.1"
    utils.removefile(filename)
    test1_stage0(filename, filesize=0)
    return test1_stage1(testconf.sgw_addr, testconf.sgw_port, filename)
Esempio n. 15
0
    def buildWorkflow(self, molecules):

        if not self.buildable:
            success, result = self.log()
            if not success:
                return (False, result)
            return (result)

        if not molecules:
            molecules = self.vpath + '/training.sdf'

        # if this is a submodel
        if self.vpath[-9:-4] == 'local':
            return (model.buildWorkflow(self, molecules))

        # if this is the top model
        BASEDIR = '/home/modeler/soft/eTOXlab/src/'
        result = ''

        nchunks = self.nchunks  # just to save typing

        itag = self.vpath.split('/')[-2]

        # split original SDFile and create 'nchunks' files called 'piece0000.sdf'...
        success, results = self.splitSet(molecules, nchunks)
        if not success:
            return (False, result)

        # prepare structures to collect information about the consolidated results
        exper = np.zeros(results, dtype=np.float64)
        recalc = []
        predic = []
        ulimit = []

        imol = np.zeros(nchunks, dtype=np.int32)
        ir2 = np.zeros(nchunks, dtype=np.float64)
        iq2 = np.zeros(nchunks, dtype=np.float64)
        isdep = np.zeros(nchunks, dtype=np.float64)

        for i in range(self.modelLV):
            recalc.append(np.zeros(results, dtype=np.float64))
            predic.append(np.zeros(results, dtype=np.float64))

        rcount = 0
        pcount = 0

        fp = open(self.vpath + '/pls-predicted.txt', 'w')
        fr = open(self.vpath + '/pls-recalculated.txt', 'w')
        header = 'Name Yexp '
        for i in range(self.modelLV):
            header += 'Y-LV%d ' % (i + 1)
        header += '\n'
        fp.write(header)
        fr.write(header)

        # make 'nchunk' calls to 'build' command using the respective pieces
        for ichunk in range(nchunks):

            #print 'LOCAL MODEL %d' % ichunk

            ndir = self.vpath + '/local%0.4d' % ichunk

            if os.path.isdir(ndir):
                shutil.rmtree(ndir, ignore_errors=True)

            os.mkdir(ndir)

            call = [
                '/usr/bin/python', BASEDIR + 'build.py', '-e', itag, '-f',
                self.vpath + '/piece%0.4d.sdf' % ichunk, '-m',
                self.vpath + '/imodel.py', '-s',
                str(ichunk)
            ]

            retcode = subprocess.call(call)

            if retcode != 0: return (False, 'error in computation')

            # collect information about the model
            if os.path.isfile(ndir + '/info.pkl'):

                modelInfo = open(ndir + '/info.pkl', 'rb')
                infoID = pickle.load(modelInfo)
                infoSeries = pickle.load(modelInfo)
                infoMD = pickle.load(modelInfo)
                infoModel = pickle.load(modelInfo)
                infoResult = pickle.load(modelInfo)
                modelInfo.close()

                for i in infoResult:
                    if 'nobj' == i[0]: imol[ichunk] = int(i[1])
                    elif 'R2' == i[0]: ir2[ichunk] = float(i[1])
                    elif 'Q2' == i[0]: iq2[ichunk] = float(i[1])
                    elif 'SDEP' == i[0]: isdep[ichunk] = float(i[1])

            # collect and accumulate recalculated and predicted values
            f = open(ndir + '/pls-recalculated.txt', 'r')
            header = True
            for line in f:
                if header:
                    header = False
                    continue
                fr.write(line)
                exper[rcount] = float(line.split()[1])
                for i in range(self.modelLV):
                    recalc[i][rcount] = float(line.split()[i + 2])
                rcount += 1
            f.close()

            f = open(ndir + '/pls-predicted.txt', 'r')
            header = True
            for line in f:
                if header:
                    header = False
                    continue

                fp.write(line)
                for i in range(self.modelLV):
                    predic[i][pcount] = float(line.split()[i + 2])
                pcount += 1
            f.close()

            ulimit.append(pcount)

        fp.close()
        fr.close()

        SSYp = np.sum(np.square(predic[-1] - exper))
        SSYr = np.sum(np.square(recalc[-1] - exper))

        emean = np.mean(exper[:pcount])
        SSY0 = 0.00
        for i in range(pcount):
            SSY0 += np.square(exper[i] - emean)

        R2 = 1.00 - (SSYr / SSY0)
        Q2 = 1.00 - (SSYp / SSY0)
        SDEP = np.sqrt(SSYp / pcount)

        #print 'R2:%5.3f' %R2, 'Q2:%5.3f' %Q2, 'SDEP:%5.3f' %SDEP

        # add information to infoResul and infoNotes
        self.infoResult = []
        self.infoResult.append(('nobj', pcount))
        self.infoResult.append(('R2', '%5.3f' % R2))
        self.infoResult.append(('Q2', '%5.3f' % Q2))
        self.infoResult.append(('SDEP', '%5.3f' % SDEP))

        self.infoNotes = []
        self.infoNotes.append(('local', '%d chunks' % nchunks))
        for i in range(nchunks):
            lab = '[%d] ' % (i + 1)
            self.infoNotes.append((lab + 'nobj', '%d ' % imol[i]))
            self.infoNotes.append((lab + 'R2', '%5.3f ' % ir2[i]))
            self.infoNotes.append((lab + 'Q2', '%5.3f ' % iq2[i]))
            self.infoNotes.append((lab + 'SDEP', '%5.3f ' % isdep[i]))

        # remove existing PNG graphics
        pngfiles = glob.glob(self.vpath + '/pls-*.png')
        for i in pngfiles:
            removefile(i)

        # generate rec vs experimental and pred vs experimental for all model dimensions
        for i in range(self.modelLV):
            nvar = str(i + 1)

            # for predicted...
            fig1 = plt.figure()
            plt.xlabel('experimental y')
            plt.ylabel('predicted LV' + nvar)
            plt.title('Predicted')

            a = 0
            for j in range(nchunks):
                plt.scatter(exper[a:ulimit[j]],
                            predic[i][a:ulimit[j]],
                            c=cm.hsv(float(j) / float(nchunks)),
                            marker='o',
                            s=30,
                            label='chunk %d' % j)
                a = ulimit[j]
            plt.legend(loc='upper left', scatterpoints=1, fontsize=10)
            fig1.savefig(self.vpath + "/pls-predicted-LV" + nvar + ".png",
                         format='png')

            # for recalculated...
            fig2 = plt.figure()
            plt.xlabel('experimental y')
            plt.ylabel('recalculated LV' + nvar)
            plt.title('Recalculated')

            a = 0
            for j in range(nchunks):
                plt.scatter(exper[a:ulimit[j]],
                            recalc[i][a:ulimit[j]],
                            c=cm.hsv(float(j) / float(nchunks)),
                            marker='o',
                            s=30,
                            label='chunk %d' % j)
                a = ulimit[j]
            plt.legend(loc='upper left', scatterpoints=1, fontsize=10)
            fig2.savefig(self.vpath + "/pls-recalculated-LV" + nvar + ".png",
                         format='png')

        shutil.copy(self.vpath + "/pls-predicted-LV%d.png" % self.modelLV,
                    self.vpath + '/predicted.png')
        shutil.copy(self.vpath + "/pls-recalculated-LV%d.png" % self.modelLV,
                    self.vpath + '/recalculated.png')

        # save log
        success, result = self.log()
        if not success:
            return (False, result)

        return (result)
Esempio n. 16
0
def Run(vars, upgrade, log):
    """
    Download core + extensions bootstrapfs tarballs and install on the hard drive

    the upgrade boolean is True when we are upgrading a node root install while 
    preserving its slice contents; in that case we just perform extra cleanup
    before unwrapping the bootstrapfs
    this is because the running system may have extraneous files
    that is to say, files that are *not* present in the bootstrapfs
    and that can impact/clobber the resulting upgrade
    
    Expect the following variables from the store:
    SYSIMG_PATH          the path where the system image will be mounted
    PARTITIONS           dictionary of generic part. types (root/swap)
                         and their associated devices.
    NODE_ID              the id of this machine
    
    Sets the following variables:
    TEMP_BOOTCD_PATH     where the boot cd is remounted in the temp
                         path
    ROOT_MOUNTED         set to 1 when the the base logical volumes
                         are mounted.
    """

    log.write("\n\nStep: Install: bootstrapfs tarball (upgrade={}).\n".format(
        upgrade))

    # make sure we have the variables we need
    try:
        SYSIMG_PATH = vars["SYSIMG_PATH"]
        if SYSIMG_PATH == "":
            raise ValueError("SYSIMG_PATH")

        PARTITIONS = vars["PARTITIONS"]
        if PARTITIONS == None:
            raise ValueError("PARTITIONS")

        NODE_ID = vars["NODE_ID"]
        if NODE_ID == "":
            raise ValueError("NODE_ID")

        VERSION = vars['VERSION'] or 'unknown'

    except KeyError as var:
        raise BootManagerException(
            "Missing variable in vars: {}\n".format(var))
    except ValueError as var:
        raise BootManagerException(
            "Variable in vars, shouldn't be: {}\n".format(var))

    try:
        # make sure the required partitions exist
        val = PARTITIONS["root"]
        val = PARTITIONS["swap"]
        val = PARTITIONS["vservers"]
    except KeyError as part:
        log.write("Missing partition in PARTITIONS: {}\n".format(part))
        return 0

    bs_request = BootServerRequest.BootServerRequest(vars)

    # in upgrade mode, since we skip InstallPartitionDisks
    # we need to run this
    if upgrade:
        log.write("Upgrade mode init : Scanning for devices\n")
        systeminfo.get_block_devices_dict(vars, log)
        utils.sysexec_noerr("vgscan --mknodes", log)
        utils.sysexec_noerr("vgchange -ay", log)

    # debugging info - show in either mode
    utils.display_disks_status(PARTITIONS, "In InstallBootstrapFS", log)

    utils.breakpoint("we need to make /dev/mapper/* appear")

    log.write("turning on swap space\n")
    utils.sysexec("swapon {}".format(PARTITIONS["swap"]), log)

    # make sure the sysimg dir is present
    utils.makedirs(SYSIMG_PATH)

    log.write("mounting root file system\n")
    utils.sysexec(
        "mount -t ext3 {} {}".format(PARTITIONS["root"], SYSIMG_PATH), log)

    fstype = 'ext3' if vars['virt'] == 'vs' else 'btrfs'

    one_partition = vars['ONE_PARTITION'] == '1'

    if (not one_partition):
        log.write("mounting vserver partition in root file system (type {})\n".
                  format(fstype))
        utils.makedirs(SYSIMG_PATH + "/vservers")
        utils.sysexec("mount -t {} {} {}/vservers"\
                      .format(fstype, PARTITIONS["vservers"], SYSIMG_PATH), log)

        if vars['virt'] == 'lxc':
            # NOTE: btrfs quota is supported from version: >= btrfs-progs-0.20 (f18+)
            #       older versions will not recongize the 'quota' command.
            log.write(
                "Enabling btrfs quota on {}/vservers\n".format(SYSIMG_PATH))
            utils.sysexec_noerr(
                "btrfs quota enable {}/vservers".format(SYSIMG_PATH))

    vars['ROOT_MOUNTED'] = 1

    # this is now retrieved in GetAndUpdateNodeDetails
    nodefamily = vars['nodefamily']
    extensions = vars['extensions']

    # in upgrade mode: we need to cleanup the disk to make
    # it safe to just untar the new bootstrapfs tarball again
    # on top of the hard drive
    if upgrade:
        CleanupSysimgBeforeUpgrade(SYSIMG_PATH, nodefamily, log)

    # the 'plain' option is for tests mostly
    plain = vars['plain']
    if plain:
        download_suffix = ".tar"
        uncompress_option = ""
        log.write("Using plain bootstrapfs images\n")
    else:
        download_suffix = ".tar.bz2"
        uncompress_option = "-j"
        log.write("Using compressed bootstrapfs images\n")

    log.write("Using nodefamily={}\n".format(nodefamily))
    if not extensions:
        log.write("Installing only core software\n")
    else:
        log.write("Requested extensions {}\n".format(extensions))

    bootstrapfs_names = [nodefamily] + extensions

    for name in bootstrapfs_names:
        tarball = "bootstrapfs-{}{}".format(name, download_suffix)
        source_file = "/boot/{}".format(tarball)
        dest_file = "{}/{}".format(SYSIMG_PATH, tarball)

        source_hash_file = "/boot/{}.sha1sum".format(tarball)
        dest_hash_file = "{}/{}.sha1sum".format(SYSIMG_PATH, tarball)

        time_beg = time.time()
        log.write("downloading {}\n".format(source_file))
        # 30 is the connect timeout, 14400 is the max transfer time in
        # seconds (4 hours)
        result = bs_request.DownloadFile(source_file, None, None, 1, 1,
                                         dest_file, 30, 14400)
        time_end = time.time()
        duration = int(time_end - time_beg)
        log.write("Done downloading ({} seconds)\n".format(duration))
        if result:
            # Download SHA1 checksum file
            log.write("downloading sha1sum for {}\n".format(source_file))
            result = bs_request.DownloadFile(source_hash_file, None, None, 1,
                                             1, dest_hash_file, 30, 14400)

            log.write("verifying sha1sum for {}\n".format(source_file))
            if not utils.check_file_hash(dest_file, dest_hash_file):
                raise BootManagerException(
                    "FATAL: SHA1 checksum does not match between {} and {}"\
                    .format(source_file, source_hash_file))

            time_beg = time.time()
            log.write("extracting {} in {}\n".format(dest_file, SYSIMG_PATH))
            result = utils.sysexec(
                "tar -C {} -xpf {} {}".format(SYSIMG_PATH, dest_file,
                                              uncompress_option), log)
            time_end = time.time()
            duration = int(time_end - time_beg)
            log.write("Done extracting ({} seconds)\n".format(duration))
            utils.removefile(dest_file)
        else:
            # the main tarball is required
            if name == nodefamily:
                raise BootManagerException(
                    "FATAL: Unable to download main tarball {} from server."\
                    .format(source_file))
            # for extensions, just issue a warning
            else:
                log.write(
                    "WARNING: tarball for extension {} not found\n".format(
                        name))

    # copy resolv.conf from the base system into our temp dir
    # so DNS lookups work correctly while we are chrooted
    log.write("Copying resolv.conf to temp dir\n")
    utils.sysexec("cp /etc/resolv.conf {}/etc/".format(SYSIMG_PATH), log)

    # Copy the boot server certificate(s) and GPG public key to
    # /usr/boot in the temp dir.
    log.write("Copying boot server certificates and public key\n")

    if os.path.exists("/usr/boot"):
        # do nothing in case of upgrade
        if not os.path.exists(SYSIMG_PATH + "/usr/boot"):
            utils.makedirs(SYSIMG_PATH + "/usr")
            shutil.copytree("/usr/boot", SYSIMG_PATH + "/usr/boot")
    elif os.path.exists("/usr/bootme"):
        # do nothing in case of upgrade
        if not os.path.exists(SYSIMG_PATH + "/usr/bootme"):
            utils.makedirs(SYSIMG_PATH + "/usr/boot")
            boot_server = file("/usr/bootme/BOOTSERVER").readline().strip()
            shutil.copy("/usr/bootme/cacert/" + boot_server + "/cacert.pem",
                        SYSIMG_PATH + "/usr/boot/cacert.pem")
            file(SYSIMG_PATH + "/usr/boot/boot_server", "w").write(boot_server)
            shutil.copy("/usr/bootme/pubring.gpg",
                        SYSIMG_PATH + "/usr/boot/pubring.gpg")

    # For backward compatibility
    if os.path.exists("/usr/bootme"):
        # do nothing in case of upgrade
        if not os.path.exists(SYSIMG_PATH + "/mnt/cdrom/bootme"):
            utils.makedirs(SYSIMG_PATH + "/mnt/cdrom")
            shutil.copytree("/usr/bootme", SYSIMG_PATH + "/mnt/cdrom/bootme")

    # ONE_PARTITION => new distribution type
    if (vars['ONE_PARTITION'] != '1'):
        # Import the GPG key into the RPM database so that RPMS can be verified
        utils.makedirs(SYSIMG_PATH + "/etc/pki/rpm-gpg")
        utils.sysexec(
            "gpg --homedir=/root --export --armor"
            " --no-default-keyring --keyring {}/usr/boot/pubring.gpg"
            " > {}/etc/pki/rpm-gpg/RPM-GPG-KEY-planetlab".format(
                SYSIMG_PATH, SYSIMG_PATH), log)
        utils.sysexec_chroot(
            SYSIMG_PATH, "rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-planetlab",
            log)

    # keep a log on the installed hdd
    stamp = file(SYSIMG_PATH + "/bm-install.txt", 'a')
    now = time.strftime("%Y-%b-%d @ %H:%M %Z", time.gmtime())
    stamp.write("Hard drive installed by BootManager {}\n".format(VERSION))
    stamp.write("Finished extraction of bootstrapfs on {}\n".format(now))
    # do not modify this, the upgrade code uses this line for checking compatibility
    stamp.write("Using nodefamily {}\n".format(nodefamily))
    stamp.close()

    return 1
Esempio n. 17
0
    except ValueError, var:
        raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var

    # mkinitrd needs /dev and /proc to do the right thing.
    # /proc is already mounted, so bind-mount /dev here
    # xxx tmp - trying to work around the f14 case:
    # check that /dev/ is mounted with devtmpfs
    # tmp - sysexec_noerr not returning what one would expect
    # if utils.sysexec_noerr ("grep devtmpfs /proc/mounts") != 0:
    utils.sysexec_noerr("mount -t devtmpfs none /dev")
    utils.sysexec("mount -o bind /dev %s/dev" % SYSIMG_PATH)
    utils.sysexec("mount -t sysfs none %s/sys" % SYSIMG_PATH)

    initrd, kernel_version = systeminfo.getKernelVersion(vars, log)
    try:
        utils.removefile("%s/boot/%s" % (SYSIMG_PATH, initrd))
    except:
        print "%s/boot/%s is already removed" % (SYSIMG_PATH, initrd)

    # hack for CentOS 5.3
    bypassRaidIfNeeded(SYSIMG_PATH, log)
    # specify ext3 for fedora14 and above as their default fs is ext4
    utils.sysexec_chroot(
        SYSIMG_PATH,
        "mkinitrd -v --with=ext3 --allow-missing /boot/initrd-%s.img %s" % (kernel_version, kernel_version),
        log,
    )

    utils.sysexec_noerr("umount %s/sys" % SYSIMG_PATH, log)
    utils.sysexec_noerr("umount %s/dev" % SYSIMG_PATH, log)
Esempio n. 18
0
    def predictWorkflow(self, molecules, detail, progress):

        # if this is a submodel
        if self.vpath[-9:-4] == 'local':
            return (model.predictWorkflow(self, molecules, detail, progress))

        # if this is the top model

        tdir = tempfile.mkdtemp(dir='/var/tmp')

        BASEDIR = '/home/modeler/soft/eTOXlab/src/'
        result = ''

        fpickle = open(self.vpath + '/cutoffs.pkl', 'rb')
        nchunks = pickle.load(fpickle)
        fpickle.close()

        # guess endpoint tag and version from vpath
        itag = self.vpath.split('/')[-2]
        ver = int(self.vpath.split('/')[-1][-4:])

        # split original SDFile and create 'nchunks' files called 'piece0000.sdf'...
        success, results, resultsOrder = self.splitQuery(molecules, tdir)
        if not success:
            return (False, results)

        aggregatedResults = []
        sys.path.append('/opt/RDKit/')
        sys.path.append('/opt/standardiser/standardise20140206/')

        # make 'nchunk' calls to 'build' command using the respective pieces
        for ichunk in range(nchunks):

            tfile = tdir + '/query%0.4d.sdf' % ichunk
            if not os.path.isfile(tfile):
                continue

            call = [
                '/usr/bin/python', BASEDIR + 'predict.py', '-e', itag, '-v',
                str(ver), '-f', tfile, '-s',
                str(ichunk)
            ]

            retcode = subprocess.call(call)

            removefile(tfile)

            if retcode != 0: return (False, 'prediction computation failed')

            f = open('results.pkl', 'rb')
            iresult = pickle.load(f)
            f.close()

            if not iresult[0]:
                return (False, iresult[1])

            for oi, iri in zip(resultsOrder[ichunk], iresult[1]):
                aggregatedResults.append((oi, iri))

        #process output and reorder the results

        aggregatedResults.sort()

        try:
            shutil.rmtree(tdir)
        except:
            pass

        results = []
        for ri in aggregatedResults:
            results.append(ri[1])

        return (True, results)
Esempio n. 19
0
        if result:
            # Download SHA1 checksum file
            log.write("downloading sha1sum for %s\n" % source_file)
            result = bs_request.DownloadFile(source_hash_file, None, None, 1, 1, dest_hash_file, 30, 14400)

            log.write("verifying sha1sum for %s\n" % source_file)
            if not utils.check_file_hash(dest_file, dest_hash_file):
                raise BootManagerException, "FATAL: SHA1 checksum does not match between %s and %s" % (
                    source_file,
                    source_hash_file,
                )

            log.write("extracting %s in %s\n" % (dest_file, SYSIMG_PATH))
            result = utils.sysexec("tar -C %s -xpf %s %s" % (SYSIMG_PATH, dest_file, uncompress_option), log)
            log.write("Done\n")
            utils.removefile(dest_file)
        else:
            # the main tarball is required
            if name == nodefamily:
                raise BootManagerException, "FATAL: Unable to download main tarball %s from server." % source_file
            # for extensions, just print a warning
            else:
                log.write("WARNING: tarball for extension %s not found\n" % (name))

    # copy resolv.conf from the base system into our temp dir
    # so DNS lookups work correctly while we are chrooted
    log.write("Copying resolv.conf to temp dir\n")
    utils.sysexec("cp /etc/resolv.conf %s/etc/" % SYSIMG_PATH, log)

    # Copy the boot server certificate(s) and GPG public key to
    # /usr/boot in the temp dir.