Ejemplo n.º 1
0
def scattercisr(xs, ys, amoeboids, mesenchymals, labels = None, xlabel = None, ylabel = None, xlabels=None, title=None, legend=None, showGrid=False, folder=None, savefile=None, **plotargs):
    assert len(xs)==len(ys), "y0 and y1 don't have the same length"
    fig = plt.figure()
    ax = fig.add_subplot(111)
    plt.grid(showGrid)
    handles = []
    
    h0 = ax.scatter(xs[mesenchymals], ys[mesenchymals], color='g', marker='o', s=7, label="mesenchymals")
    h1 = ax.scatter(xs[amoeboids], ys[amoeboids], color='b', marker='o', s=7, label="amoeboids")
    handles.append(h0)
    handles.append(h1)
    ax.legend(loc=10)
    if legend is not None:
        ax.legend(handles, legend, loc=10)
    if xlabel is not None:
        ax.set_xlabel(xlabel)
    if ylabel is not None:
        ax.set_ylabel(ylabel)
    if title is not None:
        ax.set_title(title)
    if "xlim" in plotargs.keys():
        ax.set_xlim(plotargs["xlim"])
    if "ylim" in plotargs.keys():
        ax.set_ylim(plotargs["ylim"])
    if folder is None:
        folder = ""
    if savefile is not None:
        savefilepath = join(folder, savefile)
        plt.savefig(savefilepath)
        info(savefile + " written.")
    if savefile is None:
        plt.show()
    plt.close()
Ejemplo n.º 2
0
 def checkout(self, runLogDir):
     utils.info(
             'Attempting to checkout Lucene/Solr revision: %s into directory: %s' % (
                 self.revision, self.checkoutDir))
     if not os.path.exists(self.checkoutDir):
         os.makedirs(self.checkoutDir)
     f = os.listdir(self.checkoutDir)
     x = os.getcwd()
     try:
         os.chdir(self.checkoutDir)
         if len(f) == 0:
             # clone
             if self.revision == 'LATEST':
                 utils.runCommand(
                         '%s clone --progress http://git-wip-us.apache.org/repos/asf/lucene-solr.git . > %s/checkout.log.txt 2>&1' % (
                             constants.GIT_EXE, runLogDir))
             else:
                 utils.runCommand(
                         '%s clone --progress http://git-wip-us.apache.org/repos/asf/lucene-solr.git .  > %s/checkout.log.txt 2>&1' % (
                             constants.GIT_EXE, runLogDir))
                 self.updateToRevision(runLogDir)
             utils.runCommand('%s ivy-bootstrap' % constants.ANT_EXE)
         else:
             self.updateToRevision(runLogDir)
     finally:
         os.chdir(x)
Ejemplo n.º 3
0
def bars_stacked(y0, color0, y1, color1, left, width, y_bars=None, labels = None, xlabel = None, ylabel = None, xlabels=None, title=None, legend=None, showGrid=False, folder=None, savefile=None, **plotargs):
    assert len(y0)==len(y1), "y0 and y1 don't have the same length"
    fig = plt.figure()
    ax = fig.add_subplot(111)
    plt.grid(showGrid)
    handles = []
    
    h0 = ax.bar(left, y0, width, color=color0, label="successful")
    h1 = ax.bar(left, y1, width, bottom=y0, color=color1, label="unsuccessful")
    handles.append(h0)
    handles.append(h1)
    ax.legend()
    if legend is not None:
        ax.legend(handles, legend, loc=0)
    if xlabel is not None:
        ax.set_xlabel(xlabel)
    if ylabel is not None:
        ax.set_ylabel(ylabel)
    if title is not None:
        ax.set_title(title)
    if "xlim" in plotargs.keys():
        ax.set_xlim(plotargs["xlim"])
    if "ylim" in plotargs.keys():
        ax.set_ylim(plotargs["ylim"])
    if folder is None:
        folder = ""
    if savefile is not None:
        savefilepath = join(folder, savefile)
        plt.savefig(savefilepath)
        info(savefile + " written.")
    if savefile is None:
        plt.show()
    plt.close()
Ejemplo n.º 4
0
Archivo: vm.py Proyecto: AmesianX/chef
    def export(self, targz: str, **kwargs: dict):
        if not os.path.isdir(self.path):
            utils.fail("%s: VM does not exist" % self.name)
            exit(1)
        if not targz:
            targz = '%s.tar.gz' % self.name
        targz = os.path.abspath(targz)
        utils.info("exporting to %s" % targz)
        tar = '%s/%s' % (self.path, os.path.basename(os.path.splitext(targz)[0]))
        if os.path.exists(targz):
            utils.fail("%s: package already exists" % targz)
            exit(1)

        os.chdir(self.path)  # create intermediate files in VM's path

        utils.pend("package disk image")
        utils.execute(['tar', '-cSf', tar, os.path.basename(self.path_raw)])
        utils.ok()

        for s in self.snapshots:
            utils.pend("package snapshot: %s" % s)
            local_snapshot = '%s.%s' % (os.path.basename(self.path_raw), s)
            utils.execute(['tar', '-rf', tar, local_snapshot])
            utils.ok()

        utils.pend("compress package", msg="may take some time")
        utils.execute(['gzip', '-c', tar], outfile=targz)
        utils.ok()

        utils.pend("clean up")
        os.unlink(tar)
        utils.ok()

        self.scan_snapshots()
Ejemplo n.º 5
0
 def __init__(self, const, noDelete=False):
     """Constructs the ``Simulation`` object according to ``const`` and creates an empty directory for the results."""
     self.const = const
     
     self.path = os.getcwd() + "/"
     self.resultsdir = os.path.join(self.path, constants.resultspath, const["name"])
     
     if not os.path.exists(self.resultsdir):
         os.mkdir(self.resultsdir)
     if noDelete==False:
         utils.makeFolderEmpty(self.resultsdir)
         
     self.N_amoeboid = self.const["N_amoeboid"]
     self.N_mesenchymal = self.const["N_mesenchymal"]
     self.N = self.N_amoeboid + self.N_mesenchymal
     self.NNN = int(const["max_time"] / const["dt"])
     
     self.retainCompleteDataset = utils.retainCompleteDataset(const)
     
     if noDelete==False:
         self.dsA = Dataset(Dataset.ARRAYS, const["max_time"], const["dt"], self.N, constants.DIM, self.resultsdir, fileprefix=None)
         if self.retainCompleteDataset:
             info("Created dataset of size %s" % self.dsA.getHumanReadableSize())
         else:
             info("Created temporary dataset of size %s" % self.dsA.getHumanReadableSize())
Ejemplo n.º 6
0
def setup_rootfs():
    buildkernel(e('${KERNCONF}-DEBUG'), ['mach'], buildkernellog)
    installworld('${OBJDIR}/test-root', installworldlog, distributionlog, conf="run")
    installkernel(e('${KERNCONF}'), '${OBJDIR}/test-root', installkernellog, modules=['mach'], conf="run")
    info('Installing overlay files')
    sh('rsync -ah ${TESTS_ROOT}/trueos/overlay/ ${OBJDIR}/test-root')
    sh('makefs -M ${IMAGE_SIZE} ${OBJDIR}/test-root.ufs ${OBJDIR}/test-root')
Ejemplo n.º 7
0
def plot(x, y, labels = None, xlabel = None, ylabel = None, xlabels=None, title=None, legend=None, showGrid=False, folder=None, savefile=None):
    assert len(x)==len(y), "x and y don't have the same length"
    assert len(x)>0 and type(x)==list
    fig = plt.figure()
    ax = fig.add_subplot(111)
    plt.grid(showGrid)
    handles = []
    for i, _ in enumerate(x):
        handles.append(ax.plot(x[i], y[i], linestyle=':', marker='s')[0])
        #That label stuff doesn't yet work as it should.
        if labels is not None:
            ax.text(x[i][0], y[i][0], labels[0], horizontalalignment='left', verticalalignment='top', transform=ax.transAxes)
    if legend is not None:
        ax.legend(handles, legend, loc=0)
    if xlabel is not None:
        ax.set_xlabel(xlabel)
    if ylabel is not None:
        ax.set_ylabel(ylabel)
    if title is not None:
        ax.set_title(title)
    if folder is None:
        folder = ""
    if savefile is not None:
        plt.savefig(join(folder, savefile))
        info(savefile + " written.")
    if savefile is None:
        plt.show()
    plt.close()
Ejemplo n.º 8
0
    def output(self, output_formats, **output_options):
        """
        output all results to appropriate URLs
        - output_formats: a dict mapping formats to a list of URLs
        - output_options: a dict mapping formats to options for each format
        """

        utils.info("Outputting talos results => %s", output_formats)
        tbpl_output = {}
        try:

            for key, urls in output_formats.items():
                options = output_options.get(key, {})
                _output = output.formats[key](self, **options)
                results = _output()
                for url in urls:
                    _output.output(results, url, tbpl_output)

        except utils.talosError, e:
            # print to results.out
            try:
                _output = output.GraphserverOutput(self)
                results = _output()
                _output.output('file://%s' % os.path.join(os.getcwd(), 'results.out'), results)
            except:
                pass
            print '\nFAIL: %s' % e.msg.replace('\n', '\nRETURN:')
            raise e
Ejemplo n.º 9
0
def _run_theta_single(name, debug):
    cache_dir = os.path.join(global_config.workdir, 'cache')
    cfgfile = name + '.cfg'
    dbfile = name + '.db'
    cfgfile_cache = os.path.join(cache_dir, cfgfile)
    dbfile_cache = os.path.join(cache_dir, dbfile)
    cfgfile_full = os.path.join(global_config.workdir, cfgfile)
    already_done = False
    theta = os.path.realpath(os.path.join(global_config.theta_dir, 'bin', 'theta'))
    if os.path.exists(cfgfile_cache) and os.path.exists(os.path.join(cache_dir, dbfile)):
        # compare the config files:
        already_done = open(cfgfile_cache, 'r').read() == open(cfgfile_full, 'r').read()
    if already_done:
        utils.info("Skipping 'theta %s': found corresponding output file in cachedir" % cfgfile)
        return
    utils.info("Running 'theta %s'" % cfgfile)
    params = ""
    #if debug: params += " --redirect-io=False"
    retval = os.system(theta + params + " " + cfgfile_full)
    if retval != 0:
        if os.isatty(1):
                attr = termios.tcgetattr(1)
                attr[3] |= termios.ECHO
                termios.tcsetattr(1, termios.TCSANOW, attr)
        if os.path.exists(dbfile) and not debug: os.unlink(dbfile)
        raise RuntimeError, "executing theta for cfg file '%s' failed with exit code %d" % (cfgfile, retval)
    # move to cache, also the config file ...
    shutil.move(dbfile, dbfile_cache)
    shutil.copy(cfgfile_full, cfgfile_cache)
Ejemplo n.º 10
0
def setup_network():
    global tapdev

    info('Configuring VM networking')
    tapdev = sh_str('ifconfig tap create')
    info('Using tap device {0}', tapdev)
    sh('ifconfig ${tapdev} inet ${HOST_IP} ${NETMASK} up')
Ejemplo n.º 11
0
    def unbindFeature(self, qgsfeature, editingmode=False):
        """
        Unbinds the feature from the form saving the values back to the QgsFeature.

        qgsfeature -- A QgsFeature that will store the new values.
        """
        savefields = []
        for index, control in self.fieldtocontrol.items():
            value = QVariant()
            if isinstance(control, QDateTimeEdit):
                value = control.dateTime().toString(Qt.ISODate)
            else:
                if self.layer.editType(index) == QgsVectorLayer.UniqueValues and control.isEditable():
                    # Due to http://hub.qgis.org/issues/7012 we can't have editable
                    # comboxs using QgsAttributeEditor. If the value isn't in the
                    # dataset already it will return null.  Until that bug is fixed
                    # we are just going to handle ourself.
                    value = control.currentText()
                else:
                    modified = QgsAttributeEditor.retrieveValue(control, self.layer, index, value)

            info("Setting value to %s from %s" % (value, control.objectName()))
            qgsfeature.changeAttribute(index, value)

            # Save the value to the database as a default if it is needed.
            if self.shouldSaveValue(control):
                savefields.append(index)

        if not editingmode:
            m = qgsfeature.attributeMap()
            fields_map = self.layer.pendingFields()
            attr = {str(fields_map[k].name()): str(v.toString()) for k, v in m.items() if k in savefields}
            self.form.setSavedValues(attr)

        return qgsfeature
Ejemplo n.º 12
0
	def _update_from_iter(self):
		if self.it != None:
			self.file = self.tree.get_model().get_value(self.it, 1)
			
			if self.tree.get_model().get_value(self.it, 9):
				
				# FIXME: Controlla se esiste gia il file(se l'abbiamo scaricato precedentemente)
				tmp = os.path.join(utils.UPDT_DIR, self.file)
				
				if os.path.exists(tmp):
					# Controlliamo se il file e' corretto
					bytes = os.path.getsize(tmp)
					md5   = generate.Generator.checksum(tmp)
					
					if md5 != self.tree.get_model().get_value(self.it, 4) or int(bytes) != self.tree.get_model().get_value(self.it, 3):
						os.remove(tmp)
						self._thread(self._update_file, utils.url_encode(BASE_DIR + self.file))
					else:
						self._update_percentage()
						self._go_with_next_iter()
				else:
					self._thread(self._update_file, utils.url_encode(BASE_DIR + self.file))
			else:
				self._update_percentage()
				self._go_with_next_iter()
		else:
			self.xml_util.dump_tree_to_file(self.diff_object, os.path.join(utils.UPDT_DIR, ".diff.xml"))
			
			utils.info(_("Riavvia per procedere all'aggiornamento di PyAcqua"))
			
			self.destroy()
Ejemplo n.º 13
0
def main():
    user = sh_str('id -un')
    if user == 'root':
        user = '******'

    # sh('ssh ${user}@${DOWNLOAD_HOST} rm -rf ${DOWNLOAD_TARGETDIR}')
    # sh('ssh ${user}@${DOWNLOAD_HOST} mkdir -p ${DOWNLOAD_TARGETDIR}')
    # sh('scp -pr ${RELEASE_STAGEDIR}/* ${user}@${DOWNLOAD_HOST}:${DOWNLOAD_TARGETDIR}/')
    ref_date = 0
    rel_dir = ''
    dirstring = e('${BE_ROOT}/release/${PRODUCT}')
    for x in glob.glob("{0}*".format(dirstring)):
        if e('${BUILD_ARCH_SHORT}') not in os.listdir(x):
            continue

        if os.lstat(x).st_ctime > ref_date:
            ref_date = os.lstat(x).st_ctime
            rel_dir = x

    if not rel_dir:
        error('Release not found')

    if e('${BUILD_TYPE}').lower() in ["master", "stable"]:
        buildtimestamp = os.path.basename(rel_dir).split("-")[-1]
        downloadtargetdir = e('${DOWNLOAD_BASEDIR}/${MILESTONE}/${buildtimestamp}')
    else:
        downloadtargetdir = e('${DOWNLOAD_TARGETDIR}')
    sh('ssh ${user}@${DOWNLOAD_HOST} rm -rf ${downloadtargetdir}')
    sh('ssh ${user}@${DOWNLOAD_HOST} mkdir -p ${downloadtargetdir}')
    sh('scp -pr ${rel_dir}/* ${user}@${DOWNLOAD_HOST}:${downloadtargetdir}/')
    info('Synchronizing download server to CDN')
    sh('ssh ${user}@${DOWNLOAD_HOST} /usr/local/sbin/rsync-mirror.sh')
Ejemplo n.º 14
0
def make_build_env():
    env = dict(os.environ)
    libsearpc_dir = abspath(join(TOPDIR, 'libsearpc'))
    ccnet_dir = abspath(join(TOPDIR, 'ccnet'))

    def _env_add(*a, **kw):
        kw['env'] = env
        return prepend_env_value(*a, **kw)

    _env_add('CPPFLAGS', '-I%s' % join(PREFIX, 'include'), seperator=' ')

    _env_add('LDFLAGS', '-L%s' % os.path.join(PREFIX, 'lib'), seperator=' ')

    _env_add('LDFLAGS', '-L%s' % os.path.join(PREFIX, 'lib64'), seperator=' ')

    _env_add('PATH', os.path.join(PREFIX, 'bin'))
    _env_add('PATH', THIRDPARTDIR)
    _env_add('PKG_CONFIG_PATH', os.path.join(PREFIX, 'lib', 'pkgconfig'))
    _env_add('PKG_CONFIG_PATH', os.path.join(PREFIX, 'lib64', 'pkgconfig'))
    _env_add('PKG_CONFIG_PATH', libsearpc_dir)
    _env_add('PKG_CONFIG_PATH', ccnet_dir)

    for key in ('PATH', 'PKG_CONFIG_PATH', 'CPPFLAGS', 'LDFLAGS',
                'PYTHONPATH'):
        info('%s: %s', key, env.get(key, ''))
    return env
Ejemplo n.º 15
0
def setup_server(cfg, db):
    '''Setup seafile server with the setup-seafile.sh script. We use pexpect to
    interactive with the setup process of the script.
    '''
    info('uncompressing server tarball')
    shell('tar xf seafile-server_{}_x86-64.tar.gz -C {}'
          .format(cfg.version, cfg.installdir))
    if db == 'mysql':
        autosetup_mysql(cfg)
    else:
        autosetup_sqlite3(cfg)

    with open(join(cfg.installdir, 'conf/seahub_settings.py'), 'a') as fp:
        fp.write('\n')
        fp.write('DEBUG = True')
        fp.write('\n')
        fp.write('''\
REST_FRAMEWORK = {
    'DEFAULT_THROTTLE_RATES': {
        'ping': '600/minute',
        'anon': '1000/minute',
        'user': '******',
    },
}''')
        fp.write('\n')
Ejemplo n.º 16
0
def override_profile_config(config):
    # local import may be needed to avoid circular import issues
    from utils import info
    override = {k.replace(DSL_PREFIX, ''): v for k, v in os.environ.items() if k.startswith(DSL_PREFIX)}
    for k, v in override.items():
        dest = None
        split_keys = k.split('.')
        try:
            for subkey in split_keys[:-1]:
                dest = dest or config
                if isinstance(dest, dict):
                    dest = dest[subkey]
                elif isinstance(dest, list):
                    for dictitem in dest:
                        if dictitem['name'] == subkey:
                            dest = dictitem
                            break
                    else:
                        raise KeyError(subkey)
            if dest and isinstance(dest, dict):
                dest[split_keys[-1]] = v
            else:
                # means that the split resulted in a single element
                # thus we can just use the original key
                config[k] = v
            info('Overriding {0}{1} build config var to: {2}'.format(DSL_PREFIX, k, v))
        except KeyError as e:
            # this key does not exist in the build config
            # moving on post logging this
            info('{0}{1} is not a proper build config key! KeyError for {2}'.format(DSL_PREFIX, k, e))
    return config
Ejemplo n.º 17
0
Archivo: run.py Proyecto: AmesianX/chef
def kill_me_later(timeout, extra_time=60):
    pid = os.getpid()
    if os.fork() != 0:
        return
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    int_deadline = datetime.now() + timedelta(seconds=timeout)
    kill_deadline = int_deadline + timedelta(seconds=extra_time)
    int_sent = False
    while True:
        time.sleep(1)
        now = datetime.now()
        try:
            if now < int_deadline:
                os.kill(pid, 0)  # Just poll the process
            elif now < kill_deadline:
                utils.info("execution timeout reached, interrupting")
                os.kill(pid, signal.SIGINT if not int_sent else 0)
                int_sent = True
            else:
                utils.info("execution timeout reached, killing")
                os.kill(pid, signal.SIGKILL)
                break
        except OSError:  # The process terminated
            break
    exit(0)
Ejemplo n.º 18
0
def upload(directory):
    """Upload a directory to S3.

    DIRECTORY: Directory to upload. Required.
    """
    if not AWS_BUCKET:
        utils.error('AWS_BUCKET environment variable not set. Exiting.')
        return

    conn = S3Connection()
    bucket = get_or_create_bucket(conn, AWS_BUCKET)

    files = list(utils.get_files(directory))
    total_size = 0

    utils.info('Found', len(files), 'files to upload to s3://' + AWS_BUCKET)

    for path in files:
        filesize = os.path.getsize(path)
        total_size += filesize

        utils.info('Uploading', path, '-', sizeof_fmt(filesize))

        k = Key(bucket)
        k.key = path
        k.set_contents_from_filename(path)

    utils.success('Done. Uploaded', sizeof_fmt(total_size))
Ejemplo n.º 19
0
def symfony_fix_permissions():
    with settings(warn_only=True):
        if run('test -d %s/app/cache' % env.project_path).succeeded:
            info('fixing cache and logs permissions')
            www_user = run('ps aux | grep -E \'nginx\' | grep -v root | head -1 | cut -d\  -f1')
            sudo('setfacl -R -m u:%(www_user)s:rwX -m u:$(whoami):rwX %(project_path)s/app/cache %(project_path)s/app/logs' % { 'www_user': www_user, 'project_path': env.project_path })
            sudo('setfacl -dR -m u:%(www_user)s:rwX -m u:$(whoami):rwX %(project_path)s/app/cache %(project_path)s/app/logs' % { 'www_user': www_user, 'project_path': env.project_path })
Ejemplo n.º 20
0
def autosetup_mysql(cfg):
    createdbs()
    setup_script = get_script(cfg, 'setup-seafile-mysql.sh')
    info('setting up seafile server with pexepct, script %s', setup_script)
    if not exists(setup_script):
        print 'please specify seafile script path'
    answers = [
        ('ENTER', ''),
        # server name
        ('server name', 'my-seafile'),
        # ip or domain
        ('ip or domain', '127.0.0.1'),
        # seafile data dir
        ('seafile-data', ''),
        # fileserver port
        ('seafile fileserver', ''),
        # use existing
        ('choose a way to initialize seafile databases', '2'),
        ('host of mysql server', ''),
        ('port of mysql server', ''),
        ('Which mysql user', 'seafile'),
        ('password for mysql user', 'seafile'),
        ('ccnet database', 'ccnet-existing'),
        ('seafile database', 'seafile-existing'),
        ('seahub database', 'seahub-existing'),
        ('ENTER', ''),
    ]
    _answer_questions(abspath(setup_script), answers)
Ejemplo n.º 21
0
def GetSmoothingFactor(points, hRefmethod, modifier, proportionAmount):
    hRef = 0
    if hRefmethod.lower() == "worton":
        hRef = HrefWorton(points)
    elif hRefmethod.lower() == "tufto":
        hRef = HrefTufto(points)
    elif hRefmethod.lower() == "silverman":
        hRef = HrefSilverman(points)
    elif hRefmethod.lower() == "gaussian":
        hRef = HrefGaussianApproximation(points)
    elif not hrefmethod or hrefmethod == "#":
        hRef = HrefWorton(points)

    if hRef == 0:
        utils.die("No valid hRef method was provided. Quitting.")

    if modifier.lower() == "proportion":
        h = proportionAmount * hRef
    elif modifier.lower() == "lscv":
        h = Minimize(LSCV, hRef, points)
    elif modifier.lower() == "bcv2":
        h = Minimize(BCV2, hRef, points)
    else:
        h = hRef

    utils.info("hRef (" + hRefmethod + ") = " + str(hRef))    
    utils.info("Using h = " +  str(h))
    return h
Ejemplo n.º 22
0
def run_wiki_4k_schema_bench(start, tgz, runLogDir, perfFile, gcFile):
    # we start in schemaless mode but use the schema api to add the right fields
    jmx_args = ' '.join(['-Dcom.sun.management.jmxremote',
                '-Dcom.sun.management.jmxremote.port=9999',
                '-Dcom.sun.management.jmxremote.authenticate=false',
                '-Dcom.sun.management.jmxremote.ssl=false'])
    server = SolrServer(tgz, '%s/wiki-4k-schema' % constants.BENCH_DIR, example='schemaless', memory='4g', jvm_args=jmx_args)
    server.extract(runLogDir)
    try:
        bench = JavaBench(os.getcwd())
        bench.compile(server, runLogDir)

        server.start(runLogDir)
        time.sleep(5)

        solrMajorVersion, solrImplVersion = server.get_version()

        solrUrl = 'http://%s:%s/solr/gettingstarted' % (server.host, server.port)

        utils.info('Updating schema')
        schemaApiUrl = '%s/schema' % solrUrl
        r = requests.post(schemaApiUrl,
                          data='{"add-field":{"name":"title","type":"string","stored":false, "indexed":true },'
                               '"add-field":{"name":"titleTokenized","type":"text_en","stored":true, "indexed":true },'
                               '"add-field":{"name":"body","type":"text_en","stored":false, "indexed":true },'
                               '"add-field":{"name":"date","type":"date","stored":true, "indexed":true },'
                               '"add-field":{"name":"timesecnum","type":"tint","stored":false, "indexed":true },'
                               '"add-copy-field":{"source":"title","dest":[ "titleTokenized"]},'
                               '"delete-copy-field":{ "source":"*", "dest":"_text_"}}')
        print r.json()

        logFile = '%s/wiki-4k-schema.log.txt' % runLogDir

        bytesIndexed, indexTimeSec, docsIndexed, times, garbage, peak = bench.run('wiki-4k-schema', server,
                                                            'org.apache.solr.perf.WikiIndexer',
                                                            [
                                                                # '-useHttpSolrClient', '-solrUrl', solrUrl,
                                                                '-useConcurrentUpdateSolrClient', '-solrUrl', solrUrl,
                                                                '-lineDocsFile', constants.WIKI_4K_DATA_FILE,
                                                                '-docCountLimit', '-1',
                                                                '-threadCount', '9',
                                                                '-batchSize', '100'], logFile)

        if docsIndexed != constants.WIKI_4k_NUM_DOCS:
            raise RuntimeError(
                    'Indexed num_docs do not match expected %d != found %d' % (constants.WIKI_4k_NUM_DOCS, docsIndexed))

        timeStampLoggable = '%04d-%02d-%02d %02d:%02d:%02d' % (
            start.year, start.month, start.day, start.hour, start.minute, start.second)
        with open(perfFile, 'a+') as f:
            f.write('%s,%d,%d,%.1f,%s,%s\n' % (
                timeStampLoggable, bytesIndexed, docsIndexed, indexTimeSec, solrMajorVersion, solrImplVersion))

        write_gc_file(gcFile, timeStampLoggable, solrMajorVersion, solrImplVersion, times, garbage, peak)

        return bytesIndexed, indexTimeSec, docsIndexed, times, garbage, peak
    finally:
        server.stop()
        time.sleep(5)
Ejemplo n.º 23
0
 def copy_dist(self):
     self.make_dist()
     tarball = glob.glob('*.tar.gz')[0]
     info('copying %s to %s', tarball, SRCDIR)
     shell('cp {} {}'.format(tarball, SRCDIR))
     m = re.match('{}-(.*).tar.gz'.format(self.name), basename(tarball))
     if m:
         self.version = m.group(1)
Ejemplo n.º 24
0
def cleandirs():
    info('Cleaning previous build products')
    if os.path.isdir(e('${INSTUFS_DESTDIR}')):
        sh('chflags -R 0 ${INSTUFS_DESTDIR}')
        sh('rm -rf ${INSTUFS_DESTDIR}')

    sh('rm -rf ${ISO_DESTDIR}')
    sh('mkdir -p ${ISO_DESTDIR} ${INSTUFS_DESTDIR}')
Ejemplo n.º 25
0
def load_user_config():
    if not os.path.exists(CONFIG_PATH):
        info("Warning: {0} does not exist.".format(CONFIG_PATH))
        setup()
    config = load(CONFIG_PATH)
    if "url" not in config or "apikey" not in config:
        die('Configuration file not valid. Please run "dotcloud setup" to create it.')
    return config
Ejemplo n.º 26
0
 def on_connect(self):
     info("received connection")
     self._connected = True
     self.loopstop.clear()
     self.hardwarestop.clear()
     self.done = False
     self.loopthd.start()
     self.hardwarethd.start()
Ejemplo n.º 27
0
    def _on_skin_ok(self, widget):
        mod, it = self.view.get_selection().get_selected()

        if it != None:
            set("skin", mod.get_value(it, 0))
            save()
            utils.info(_("Devi riavviare per far si che tutte le modifiche siano applicate."))
            self._on_delete_event()
Ejemplo n.º 28
0
 def compile(self, server, runLogDir):
     buildDir = self.build_dir()
     if not os.path.exists(buildDir):
         os.makedirs(buildDir)
     cmd = ['javac', '-d', buildDir, '-classpath', ':'.join(server.get_jars())]
     cmd.extend(self.src_files())
     utils.info('Running: %s' % ' '.join(cmd))
     utils.runComand('javac', cmd, os.path.join(runLogDir, 'java-bench-compile.log.txt'))
Ejemplo n.º 29
0
 def do_ls(self, lb_name):
     """ List all available load balancers
     """
     if not lb_name and not self._lb:
         for lb in self.conn.get_all_load_balancers():
             print info(lb.name)
     else:
         self.do_status(lb_name)
Ejemplo n.º 30
0
def merge_port_trees():
    for i in config['port_trees']:
        info(e('Merging ports tree ${i}'))
        for p in glob('${i}/*/*'):
            portpath = '/'.join(p.split('/')[-2:])
            sh('rm -rf ${PORTS_OVERLAY}/${portpath}')
            sh('mkdir -p ${PORTS_OVERLAY}/${portpath}')
            sh('cp -lr ${p}/ ${PORTS_OVERLAY}/${portpath}')
Ejemplo n.º 31
0
    def fire(self, data=None):
        """Cause pipeline execution"""
        outputs = []
        with self.execution_lock:
            info(f"{self.trigger_name} is firing!")
            self.data_pool.clear_data()
            try:
                if data is not None:
                    self.package_data(data)
                info("Executing pipeline(s).")
                for pipeline in self.pipelines:
                    res = pipeline.run()
                    outputs.append(res)

                # squeeze
                if len(outputs) == 1:
                    outputs = outputs[0]
            except Exception as ex:
                self.handle_execution_exception(ex)
                outputs = {"ERROR": str(ex)}
            self.clean_up_data()

        return outputs
Ejemplo n.º 32
0
    def run(self, data_pool):
        """Runs the chain"""
        info("-------------------")
        info("{} chain [{}]".format("Running", self.name))
        info("-------------------")

        data_pool.on_chain_start(self.get_name())
        # iterate the chain components
        for c, component in enumerate(self.components):
            info("||| Running component {}/{} : type: {} - name: {}".format(c + 1, self.num_components, component.get_component_name(), component.get_name()))
            component.assign_data_pool(data_pool)
            component.run()
            data_pool.on_component_completion(self.get_name(), component.get_name())
            data_pool.clear_feeders()
            data_pool.add_feeders(None, component.get_name())
        data_pool.on_chain_completion(self.get_name())
def create_upgradefile():
    """
    Copy the manifest, and all other files, into a temp directory,
    then create a tarball from that.  We need to rename ${PRODUCT}-MANIFEST
    to simply MANIFEST, and all the Pakages files go into the base directory.
    We'll name the resulting file ${PRODUCT}-${VERSION}.tar
    """
    info("Creating update tar-file")
    temp_dir = tempfile.mkdtemp()
    source_dir = e("${UPGRADE_STAGEDIR}")
    for entry in os.listdir(source_dir):
        if entry == e("${PRODUCT}-MANIFEST"):
            shutil.copyfile(os.path.join(source_dir, entry),
                            os.path.join(temp_dir, "MANIFEST"))
        elif entry == "Packages":
            for pkgfile in os.listdir(os.path.join(source_dir, entry)):
                shutil.copyfile(os.path.join(source_dir, entry, pkgfile),
                                os.path.join(temp_dir, pkgfile))
        else:
            shutil.copyfile(os.path.join(source_dir, entry),
                            os.path.join(temp_dir, entry))
    sh("tar -C {0} -cf {1} .".format(temp_dir, e("${BE_ROOT}/release/${PRODUCT}-${VERSION}-unsigned.tar")))
    shutil.rmtree(temp_dir)
Ejemplo n.º 34
0
 def setup_nltk_resources(config):
     try:
         info("Probing WordNet...")
         wn.VERB
     except:
         info("Installing WordNet...")
         nltk_download(config, "wordnet")
         info("Probing WordNet...")
         wn.VERB
def minimize_temperature_simulated_annealing_gradient(layout,
                                                      total_power_budget,
                                                      num_iterations):

    # Generate a valid random start
    random_start = generate_random_power_distribution(layout,
                                                      total_power_budget)
    utils.info(2, "\tGenerated a random start: " + str(random_start))

    # Define constraints
    constraints = ({
        'type': 'eq',
        'fun': lambda x: sum(x) - total_power_budget
    }, )

    # Define bounds (these seem to be ignored by the local minimizer - to investigate TODO)
    bounds = ()
    for i in range(0, layout.get_num_chips()):
        bounds = bounds + ((layout.get_chip().get_power_levels()[0],
                            layout.get_chip().get_power_levels()[-1]), )

    # Call the basinhoping algorithm with a local minimizer that handles constraints and bounds: SLSQP
    minimizer_kwargs = {
        "method": "SLSQP",
        "args": layout,
        "constraints": constraints,
        "bounds": bounds,
    }

    #ret = basinhopping(layout_temperature, random_start, minimizer_kwargs=minimizer_kwargs, niter=num_iterations, accept_test=MyBounds())
    ret = basinhopping(basinhopping_objective_layout_temperature,
                       random_start,
                       minimizer_kwargs=minimizer_kwargs,
                       niter=num_iterations)

    # sys.stderr.write("global minimum:  = %.4f,%.4f f(x0) = %.4f" % (ret.x[0], ret.x[1], ret.fun))
    return [ret.fun, list(ret.x)]
Ejemplo n.º 36
0
def prepare_APP_data_dir():
    """Creates a new APP root directory"""

    info('Preparing {0} root directory'.format(APP))
    nrd = APP_root_dir()
    # tgt_cfg = os.path.join(nrd, 'cfg', 'ngamsServer.conf')
    tgt_cfg = None
    res = run('mkdir {0}'.format(nrd))
    with cd(APP_source_dir()):
        for d in APP_DATAFILES:
            res = run('scp -r {0} {1}/.'.format(d, nrd), quiet=True)
        if res.succeeded:
            success("{0} data directory ready".format(APP))
            return tgt_cfg

    # Deal with the errors here
    error = '{0} root directory preparation under {1} failed.\n'.format(
        APP, nrd)
    if res.return_code == 2:
        error = (nrd + " already exists. Specify APP_OVERWRITE_ROOT to "
                 "overwrite, or a different APP_ROOT_DIR location")
    else:
        error = res
    abort(error)
Ejemplo n.º 37
0
Archivo: dsl.py Proyecto: zfstor/build
def override_profile_config(config):
    # local import may be needed to avoid circular import issues
    from utils import info
    override = {
        k.replace(DSL_PREFIX, ''): v
        for k, v in os.environ.items() if k.startswith(DSL_PREFIX)
    }
    for k, v in override.items():
        dest = None
        split_keys = k.split('.')
        try:
            for subkey in split_keys[:-1]:
                dest = dest or config
                if isinstance(dest, dict):
                    dest = dest[subkey]
                elif isinstance(dest, list):
                    for dictitem in dest:
                        if dictitem['name'] == subkey:
                            dest = dictitem
                            break
                    else:
                        raise KeyError(subkey)
            if dest and isinstance(dest, dict):
                dest[split_keys[-1]] = v
            else:
                # means that the split resulted in a single element
                # thus we can just use the original key
                config[k] = v
            info('Overriding {0}{1} build config var to: {2}'.format(
                DSL_PREFIX, k, v))
        except KeyError as e:
            # this key does not exist in the build config
            # moving on post logging this
            info('{0}{1} is not a proper build config key! KeyError for {2}'.
                 format(DSL_PREFIX, k, e))
    return config
Ejemplo n.º 38
0
    def __init__(self, results, authfile=None):
        Output.__init__(self, results)
        self.authfile = authfile
        self.oauth = None
        if authfile is not None:
            # get datazilla oauth credentials
            if '://' in authfile:  # authfile is a URL
                try:
                    contents = urllib.urlopen(authfile).read()
                    fd, authfile = tempfile.mkstemp(suffix='.py')
                    os.write(fd, contents)
                    os.close(fd)
                except Exception, e:
                    raise utils.talosError(str(e))

            assert os.path.exists(
                authfile), "Auth file not found: %s" % authfile
            module_name = 'passwords'
            module = imp.load_source(module_name, authfile)
            self.oauth = getattr(module, 'datazillaAuth', None)
            if self.oauth is None:
                utils.info(
                    "File '%s' does not contain datazilla oauth information",
                    authfile)
Ejemplo n.º 39
0
    def output(self, results, results_url, tbpl_output):
        """output to the results_url
        - results : DatazillaResults instance
        - results_url : http:// or file:// URL
        """

        # print out where we're sending
        utils.info("Outputting datazilla results to %s", results_url)

        # parse the results url
        results_url_split = utils.urlsplit(results_url)
        results_scheme, results_server, results_path, _, _ = results_url_split

        if results_scheme in ('http', 'https'):
            self.post(results, results_server, results_path, results_scheme,
                      tbpl_output)
        elif results_scheme == 'file':
            f = file(results_path, 'w')
            f.write(json.dumps(results.datasets(), indent=2, sort_keys=True))
            f.close()
        else:
            raise NotImplementedError(
                "%s: %s - only http://, https://, and file:// supported" %
                (self.__class__.__name__, results_url))
Ejemplo n.º 40
0
def cleanup_env():
    global poudriere_proc

    info('Cleaning up poudriere environment...')
    if poudriere_proc and poudriere_proc.poll() is None:
        try:
            poudriere_proc.terminate()
            poudriere_proc.wait()
        except OSError:
            info('Cannot kill poudriere, it has probably already terminated')

    if e('${USE_ZFS}'):
        info('Cleaning jail clean snaspshot')
        sh('zfs destroy -r ${ZPOOL}${ZROOTFS}/jail@clean')

    info('Unmounting ports overlay...')

    if e("${SDK}") == "yes":
        info('SDK: Saving copy of ports tree...')
        sh('tar cJf ${BE_ROOT}/ports.txz --exclude .git -C ${PORTS_OVERLAY} .')

    sh('rm -rf ${PORTS_OVERLAY}')
    for cmd in jailconf.get('link', []):
        sh('umount -f', cmd['source'])
Ejemplo n.º 41
0
 def sample(train_data, train_labels):
     # do sampling processing
     if self.do_sampling:
         for split_index, (tr, vl) in enumerate(splits):
             aug_indexes = []
             orig_tr_size = len(tr)
             ldaug = LabelledDataAugmentation()
             if self.sampling_method == defs.sampling.oversample:
                 for (label1, label2, ratio) in self.sampling_ratios:
                     aug_indexes.append(
                         ldaug.oversample_to_ratio(self.train,
                                                   self.train_labels,
                                                   [label1, label2],
                                                   ratio,
                                                   only_indexes=True,
                                                   limit_to_indexes=tr))
                     info(
                         "Sampled via {}, to ratio {}, for labels {},{}. Modification size: {} instances."
                         .format(self.sampling_method, ratio, label1,
                                 label2, len(aug_indexes[-1])))
                 aug_indexes = np.concatenate(aug_indexes)
                 tr = np.append(tr, aug_indexes)
                 info("Total size change: from {} to {} training instances".
                      format(orig_tr_size, len(tr)))
             elif self.sampling_method == defs.sampling.undersample:
                 for (label1, label2, ratio) in self.sampling_ratios:
                     aug_indexes.append(
                         ldaug.undersample_to_ratio(self.train_data,
                                                    self.train_labels,
                                                    [label1, label2],
                                                    ratio,
                                                    only_indexes=True))
                     info(
                         "Sampled via {}, to ratio {}, for labels {},{}. Modification size: {} instances."
                         .format(self.sampling_method, ratio, label1,
                                 label2, len(aug_indexes[-1])))
                 aug_indexes = np.concatenate(aug_indexes)
                 tr = np.delete(tr, aug_indexes)
                 info("Total size change: from {} to {} training instances".
                      format(orig_tr_size, len(tr)))
             else:
                 error(
                     "Undefined augmentation method: {} -- available are {}"
                     .format(self.sampling_method, defs.avail_sampling))
             splits[split_index] = (tr, vl)
         return splits
Ejemplo n.º 42
0
def setup_mysql_prompt(setup_script):
    info('setting up seafile server with pexepct, script %s', setup_script)
    answers = [
        ('ENTER', ''),
        # server name
        ('server name', 'my-seafile'),
        # ip or domain
        ('ip or domain', '127.0.0.1'),
        # seafile data dir
        ('seafile-data', ''),
        # fileserver port
        ('seafile fileserver', ''),
        # use existing
        ('choose a way to initialize seafile databases', '2'),
        ('host of mysql server', ''),
        ('port of mysql server', ''),
        ('Which mysql user', 'seafile'),
        ('password for mysql user', 'seafile'),
        ('ccnet database', 'ccnet-existing'),
        ('seafile database', 'seafile-existing'),
        ('seahub database', 'seahub-existing'),
        ('ENTER', ''),
    ]
    _answer_questions(abspath(setup_script), answers)
Ejemplo n.º 43
0
def setup_and_test(db, initmode):
    cfg = ServerConfig(
        installdir=INSTALLDIR,
        tarball=join(
            TOPDIR, 'seafile-server_{}_x86-64.tar.gz'.format(seafile_version)),
        version=seafile_version,
        initmode=initmode)
    info('Setting up seafile server with %s database', db)
    setup_server(cfg, db)
    # enable webdav, we're going to seafdav tests later
    shell('''sed -i -e "s/enabled = false/enabled = true/g" {}'''.format(
        join(INSTALLDIR, 'conf/seafdav.conf')))
    try:
        start_server(cfg)
        info('Testing seafile server with %s database', db)
        create_test_user(cfg)
        run_tests(cfg)
    except:
        for logfile in glob.glob('{}/logs/*.log'.format(INSTALLDIR)):
            shell('echo {0}; cat {0}'.format(logfile))
        for logfile in glob.glob('{}/seafile-server-{}/runtime/*.log'.format(
                INSTALLDIR, seafile_version)):
            shell('echo {0}; cat {0}'.format(logfile))
        raise
Ejemplo n.º 44
0
    def create(config):
        """Function to instantiate a learning"""
        name = config.name
        candidates = [
            KMeansClusterer, NaiveBayes, Dummy, LogisticRegression, SVM
        ]
        # instantiate non-neural candidates
        for candidate in candidates:
            if name == candidate.name:
                return candidate(config)

        # instantiate neural candidates
        try:
            neural_wrapper_class = neural_instantiator.get_neural_wrapper_class(
                name)
            info(
                f"Parsed wrapper: {neural_wrapper_class.name} from learner name: {name}"
            )
            return neural_wrapper_class(config)
        except ValueError:
            # handled in the neural instantiator
            pass
        error("Undefined learning: {}. Available ones are: {}".format(
            name, candidates))
Ejemplo n.º 45
0
    def InitializeNewProfile(self, profile_dir, browser_config):
        """Runs browser with the new profile directory, to negate any performance
            hit that could occur as a result of starting up with a new profile.  
            Also kills the "extra" browser that gets spawned the first time browser
            is run with a new profile.
            Returns 1 (success) if PROFILE_REGEX is found,
            and 0 (failure) otherwise

        Args:
            browser_config: object containing all the browser_config options
            profile_dir: The full path to the profile directory to load
        """
        INFO_REGEX = re.compile('__browserInfo(.*)__browserInfo', re.DOTALL|re.MULTILINE)
        PROFILE_REGEX = re.compile('__metrics(.*)__metrics', re.DOTALL|re.MULTILINE)

        command_args = utils.GenerateBrowserCommandLine(browser_config["browser_path"], 
                                                        browser_config["extra_args"], 
                                                        browser_config["deviceroot"],
                                                        profile_dir, 
                                                        browser_config["init_url"])

        if not browser_config['remote']:
            browser = talosProcess.talosProcess(command_args, env=os.environ.copy(), logfile=browser_config['browser_log'])
            browser.run()
            browser.wait()
            browser = None
            time.sleep(5)
        else:
            self.ffprocess.runProgram(browser_config, command_args, timeout=1200)

        res = 0
        if not os.path.isfile(browser_config['browser_log']):
            raise talosError("initalization has no output from browser")
        results_file = open(browser_config['browser_log'], "r")
        results_raw = results_file.read()
        results_file.close()
        match = PROFILE_REGEX.search(results_raw)
        if match:
            res = 1
        else:
            utils.info("Could not find %s in browser_log: %s", PROFILE_REGEX.pattern, browser_config['browser_log'])
            utils.info("Raw results:%s", results_raw)
            utils.info("Initialization of new profile failed")
        match = INFO_REGEX.search(results_raw)
        if match:
            binfo = match.group(1)
            print binfo
            for line in binfo.split('\n'):
                if line.strip().startswith('browser_name'):
                    browser_config['browser_name'] = line.split(':')[1]
                if line.strip().startswith('browser_version'):
                    browser_config['browser_version'] = line.split(':')[1]
                if line.strip().startswith('buildID'):
                    browser_config['buildid'] = line.split(':')[1]

        return res
Ejemplo n.º 46
0
    def testCleanup(self, browser_config, profile_dir, test_config, cm,
                    temp_dir):
        try:
            if cm:
                cm.stopMonitor()

            if os.path.isfile(browser_config['browser_log']):
                results_file = open(browser_config['browser_log'], "r")
                results_raw = results_file.read()
                results_file.close()
                utils.info(results_raw)

            if profile_dir:
                try:
                    self.cleanupAndCheckForCrashes(browser_config, profile_dir,
                                                   test_config['name'])
                except talosError:
                    # ignore this error since we have already checked for crashes earlier
                    pass

            if temp_dir:
                self.cleanupProfile(temp_dir)
        except talosError, te:
            utils.debug("cleanup error: %s", te.msg)
Ejemplo n.º 47
0
def make_build_env():
    env = dict(os.environ)
    libsearpc_dir = abspath(join(TOPDIR, 'libsearpc'))
    ccnet_dir = abspath(join(TOPDIR, 'ccnet-server'))

    def _env_add(*a, **kw):
        kw['env'] = env
        return prepend_env_value(*a, **kw)

    _env_add('CPPFLAGS', '-I%s' % join(PREFIX, 'include'), seperator=' ')

    _env_add('LDFLAGS', '-L%s' % join(PREFIX, 'lib'), seperator=' ')

    _env_add('LDFLAGS', '-L%s' % join(PREFIX, 'lib64'), seperator=' ')

    _env_add('PATH', join(PREFIX, 'bin'))
    if on_github_actions():
        _env_add(
            'PYTHONPATH',
            join(os.environ.get('RUNNER_TOOL_CACHE'),
                 'Python/3.6.9/x64/lib/python3.6/site-packages'))
    _env_add('PYTHONPATH', join(PREFIX, 'lib/python3.6/site-packages'))
    _env_add('PKG_CONFIG_PATH', join(PREFIX, 'lib', 'pkgconfig'))
    _env_add('PKG_CONFIG_PATH', join(PREFIX, 'lib64', 'pkgconfig'))
    _env_add('PKG_CONFIG_PATH', libsearpc_dir)
    _env_add('PKG_CONFIG_PATH', ccnet_dir)
    _env_add('LD_LIBRARY_PATH', join(PREFIX, 'lib'))

    # Prepend the seafile-server/python to PYTHONPATH so we don't need to "make
    # install" each time after editing python files.
    _env_add('PYTHONPATH', join(SeafileServer().projectdir, 'python'))

    for key in ('PATH', 'PKG_CONFIG_PATH', 'CPPFLAGS', 'LDFLAGS',
                'PYTHONPATH'):
        info('%s: %s', key, env.get(key, ''))
    return env
Ejemplo n.º 48
0
def tearing_as_in_chemical_engineering():

    # The test problems in the benchmarks module are taken from:
    # http://dx.doi.org/10.4173/mic.1983.3.2   (available free of charge)
    directed_graph = create_testproblem('Problem 7 (opt=3)')
    info(directed_graph)

    # The greedy heuristic runs with lookahead:
    cost, torn_edges = run_mfes_heuristic(directed_graph)

    # The heuristic is generic: The graph can have edge weights, so the cost
    # does not necessarily equal len(torn_edges) in the general case; however,
    # for Problem 7 it does, as this test problem has unit weights.
    print('It is sufficient to remove', cost,
          'edges to make the graph acyclic')

    if has_matplotlib():
        plot_tearing_result(directed_graph, torn_edges)
    else:
        stderr.write('Plotting requires matplotlib.\n')

    if has_gurobi():
        minimum_cost = run_exact_mfes(directed_graph)
        print('It is necessary to remove', minimum_cost, 'edges.')
Ejemplo n.º 49
0
def generate_multi_candidates(layout, candidate_random_trials,
                              num_neighbor_candidates,
                              max_num_neighbor_candidate_attempts,
                              num_chips_to_add, add_scheme):
    utils.info(3, "generating multi candidates")
    num_attempts = 0
    while ((len(candidate_random_trials) < num_neighbor_candidates)
           and (num_attempts < max_num_neighbor_candidate_attempts)):
        num_attempts += 1
        if (add_scheme is not None) and ('cradle' in add_scheme) and (
                utils.argv.num_chips % 3 == 0
        ):  ###TODO: if remaining chips to add not a multiple of 3 call add_multi_chip() instead of add_cradle()

            candidate_list = add_cradle(layout)

            if candidate_list is None:
                continue
            elif len(candidate_list) < 3:
                continue
        else:
            candidate_list = add_multi_chip(
                layout, max_num_neighbor_candidate_attempts, num_chips_to_add)
            if candidate_list == None:
                continue
        #layout.draw_in_3D(None,True)
        #for candidtates in candidate_list:
        #print 'there are that many chips --> ',len(layout.get_chip_positions())
        #	if layout.get_chip_positions()[-1] == candidtates:
        #		print "\n\n\n\n\n\n\nWTF\n\n\n\n\n"
        candidate_random_trials.append(candidate_list)

    if len(candidate_random_trials) != num_neighbor_candidates:
        utils.info(
            0, "Ran out of trials\nOnly " + str(len(candidate_random_trials)) +
            " of " + str(num_neighbor_candidates) + " were found")
    return candidate_random_trials
Ejemplo n.º 50
0
 def start(self, runLogDir):
     x = os.getcwd()
     try:
         os.chdir(self.extract_dir)
         cmd = ['%s/bin/solr' % self.extract_dir, 'start', '-p', self.port]
         if self.host is not None:
             cmd.extend(['-h', self.host])
         if self.memory is not None:
             cmd.extend(['-m', self.memory])
         if self.zk_host is not None:
             cmd.extend(['-c', '-z', self.zk_host])
         if self.server_dir is not None:
             cmd.extend(['-d', self.server_dir])
         if self.solr_home is not None:
             cmd.extend(['-s', self.solr_home])
         if self.example is not None:
             cmd.extend(['-e', self.example])
         if self.jvm_args is not None:
             cmd.append(self.jvm_args)
         utils.info('Running solr with command: %s' % ' '.join(cmd))
         utils.runComand('solr server', cmd,
                         '%s/server%s.log.txt' % (runLogDir, self.name))
     finally:
         os.chdir(x)
 def finish_module(self):
     log('Progress.finish_module: self.module_steps=%s, self.module_current=%s'
         % (self.module_steps, self.module_current))
     if not self.module_steps == self.module_current:
         skip = self.module_steps - self.module_current
         self.current += skip
         self.module_current += skip
     percent = self.current * 100 / self.steps
     log('Progress.finish_module: self.current=%s, self.steps=%s' %
         (self.current, self.steps))
     if self.current == self.steps:
         if self.enable:
             log('Progress.finish_module: self.enable=%s' % (self.enable))
             self.bar.update(100, info('name'), 'Done')
             monitor.waitForAbort(1)
             self.bar.close()
Ejemplo n.º 52
0
 def testheartbeat(self):
     self.teststr("+threads\r\n")
     self.teststr("heartbeat_toggle\r\n")
     u.info("HB on?")
     sleep(2)
     self.teststr("+threads\r\n")
     self.teststr("heartbeat_toggle\r\n")
     u.info("HB off?")
     sleep(2)
     self.teststr("+threads\r\n")
     self.teststr("heartbeat_toggle\r\n")
     u.info("HB on?")
     sleep(2)
     self.teststr("+threads\r\n")
     self.teststr("heartbeat_toggle\r\n")
     u.info("HB off?")
     sleep(2)
     self.teststr("+threads\r\n")
Ejemplo n.º 53
0
    def run(self):
        # load config from database
        self.load_config()

        utils.info("starting statscollector module")
        while True:
            # update exchange rates
            rates = utils.get_exchange_rates()
            if rates and len(rates):
                insertquery = 'INSERT INTO forex (btc2usd, btc2eur, btc2gbp, btc2cad, btc2sgd, btc2jpy, btc2inr) VALUES ("%s", "%s", "%s", "%s", "%s", "%s", "%s")' % (
                    rates["USD"],
                    rates["EUR"],
                    rates["GBP"],
                    rates["CAD"],
                    rates["SGD"],
                    rates["JPY"],
                    rates["INR"],
                )
            deletequery = 'DELETE FROM forex WHERE fid IN (SELECT fid FROM forex LIMIT 1)'
            # add latest values
            utils.populate_db(self.conn, insertquery)
            # delete first row
            utils.populate_db(self.conn, deletequery)
            utils.info("updated forex rates")
            # update btc address balance
            rows = utils.search_db(self.conn,
                                   'SELECT address FROM btcaddresses')
            if rows and len(rows):
                count = 0
                for entry in rows:
                    if self.update_balance(entry[0]):
                        count += 1
                utils.info("updated balance for %d tracked addresses" %
                           (count))

            # load exchange rates from stats table
            query = 'SELECT btc2usd FROM forex ORDER BY fid DESC LIMIT 1'
            rows = utils.search_db(self.conn, query)
            self.config["exchangerates"]["btc2usd"] = rows[0][0]

            # summary of all addresses
            summary = self.address_summary()

            # lowest|highest balance for ransom/donation recipients
            # most common sender/receiver
            # most common sender/receiver for ransom/donation
            # lowest|highest paying|receiving sender/receiver
            # highest balance/txs/rcvd/sent

            ## sleep
            time.sleep(self.config["statscollectiondelay"])

            # reload config from database
            self.load_config()
Ejemplo n.º 54
0
def setup_container():
    """Create and prepare a docker container and let Fabric point at it"""

    from docker.client import DockerClient

    image = 'centos:centos7'
    container_name = 'APP_installation_target'
    cli = DockerClient.from_env(version='auto', timeout=10)

    # Create and start a container using the newly created stage1 image
    cont = cli.containers.run(image=image, name=container_name, remove=False, detach=True, tty=True)
    success("Created container %s from %s" % (container_name, image))

    # Find out container IP, prepare container for APP installation
    try:
        host_ip = cli.api.inspect_container(cont.id)['NetworkSettings']['IPAddress']

        info("Updating and installing OpenSSH server in container")
        cont.exec_run('yum -y update')
        cont.exec_run('yum -y install openssh-server sudo')
        cont.exec_run('yum clean all')

        info('Configuring OpenSSH to allow connections to container')
        add_public_ssh_key(cont)
        cont.exec_run('sed -i "s/#PermitRootLogin yes/PermitRootLogin yes/" /etc/ssh/sshd_config')
        cont.exec_run('sed -i "s/#UseDNS yes/UseDNS no/" /etc/ssh/sshd_config')
        cont.exec_run('ssh-keygen -A')
        cont.exec_run('chown root.root /root/.ssh/authorized_keys')
        cont.exec_run('chmod 600 /root/.ssh/authorized_keys')
        cont.exec_run('chmod 700 /root/.ssh')

        info('Starting OpenSSH deamon in container')
        cont.exec_run('/usr/sbin/sshd -D', detach=True)
    except:
        failure("Error while preparing container for APP installation, cleaning up...")
        cont.stop()
        cont.remove()
        raise

    # From now on we connect to root@host_ip using our SSH key
    env.hosts = host_ip
    env.user = '******'
    if 'key_filename' not in env and 'key' not in env:
        env.key_filename = os.path.expanduser("~/.ssh/id_rsa")

    # Make sure we can connect via SSH to the newly started container
    # We disable the known hosts check since docker containers created at
    # different times might end up having the same IP assigned to them, and the
    # ssh known hosts check will fail
    with settings(disable_known_hosts=True):
        execute(check_ssh)

    success('Container successfully setup! APP installation will start now')
    return DockerContainerState(cli, cont)
Ejemplo n.º 55
0
def action_api():
    data = request.values.to_dict()
    payload = json.loads(data["payload"])
    if not valid_slack_request(request):
        return abort(404)

    if "actions" not in payload:
        return "not implemented"

    option = payload["actions"][0]
    action = option["name"]

    if payload["callback_id"] == "configure_password_server":
        if action == "no_reconfigure":
            return info("Password server unchanged.")

        elif action == "no_configure":
            return success(
                "Sure! for more information about the pass command working "
                "check `/pass help` or our web page in "
                "https://slashpass.co"
            )

        team = db.session.query(Team).filter_by(slack_id=payload["team"]["id"]).first()

        if action == "reconfigure_server":
            if not validators.url(option["value"]):
                return error("Invalid URL format, use: https://<domain>")

            if not team.register_server(option["value"]):
                return error("Unable to retrieve the _public_key_ " "from the server")
            return success("Password server successfully updated!")

        elif action == "use_demo_server":
            if not team.register_server(DEMO_SERVER):
                return error(
                    "An error occurred registering the server, " "please try later."
                )
            return success(
                "The testing server is already configured! remember that "
                "the data on this server can be deleted without prior "
                "notice, when you want to configure your company server "
                "you should only execute the command `/pass configure` along "
                "with the url of your the server."
            )
Ejemplo n.º 56
0
def PrintCellSizeEvaluation(cellSize, varianceLocation, varianceMobility):
    sigmaLocation = math.sqrt(varianceLocation)
    sigmaMobility = math.sqrt(varianceMobility)
    utils.info("  Calculated cell size %.2f" % cellSize)
    upperMinimum = min(sigmaLocation*2, sigmaMobility/2)
    lowerMinimum = min(sigmaLocation/2.5, sigmaMobility/10)
    #print "  To capture detail of location variance, cell size should be between", sigmaLocation/10, sigmaLocation
    #print "  To capture detail of mobility variance, cell size should be between", sigmaMobility/10, sigmaMobility
    utils.info("  Prefered cell size is between %.2f and %.2f"%(lowerMinimum,upperMinimum))
    if cellSize > upperMinimum:
        n = int(cellSize/upperMinimum)
        n = n*n
        utils.info("  Processing will take %d times as long if cell size is %.2f"%(n,upperMinimum))
Ejemplo n.º 57
0
 def produce_outputs(self):
     data = [x.data for x in self.input_dps]
     data_classes = [get_data_class(x.data) for x in self.input_dps]
     if not all(issubclass(x, Numeric) for x in data_classes):
         error(
             f"{self.name} requires numeric-only input data, but was fed: {data_classes}"
         )
     insts = [x.instances for x in data]
     for d in self.input_dps:
         info(f"{d} : {d.data.instances.shape}")
     info(f"Manipulating {self.name} inputs: {shapes_list(insts)}")
     self.outputs = np.concatenate(insts, axis=1)
     info(f"Produced {self.name} outputs: {self.outputs.shape}")
Ejemplo n.º 58
0
def installworld(destdir, worldlog, distriblog):
    info('Installing world in {0}', destdir)
    info('Log file: {0}', worldlog)
    sh("env MAKEOBJDIRPREFIX=${OBJDIR}",
       "make",
       "-C ${TRUEOS_ROOT}",
       "installworld",
       "DESTDIR=${destdir}",
       "__MAKE_CONF=${makeconfbuild}",
       log=worldlog)

    info('Creating distribution in {0}', destdir)
    info('Log file: {0}', distriblog)
    sh("env MAKEOBJDIRPREFIX=${OBJDIR}",
       "make",
       "-C ${TRUEOS_ROOT}",
       "distribution",
       "DESTDIR=${destdir}",
       "__MAKE_CONF=${makeconfbuild}",
       log=distriblog)
Ejemplo n.º 59
0
def optimize_layout_linear_random_greedy():
    # Create an initial layout
    layout = Layout(utils.argv.chip, [[1, 0.0, 0.0]], utils.argv.medium,
                    utils.argv.overlap, [])

    max_num_random_trials = 5  # TODO: Don't hardcode this
    while (layout.get_num_chips() != utils.argv.num_chips):
        utils.info(
            1, "* Generating " + str(max_num_random_trials) +
            " candidate positions for chip #" +
            str(1 + layout.get_num_chips()) + " in the layout")
        num_random_trials = 0
        candidate_random_trials = []
        while (len(candidate_random_trials) < max_num_random_trials):
            [picked_level, picked_x,
             picked_y] = layout.get_random_feasible_neighbor_position(-1)
            candidate_random_trials.append([picked_level, picked_x, picked_y])

        # Pick a candidate
        max_power = -1
        picked_candidate = None
        for candidate in candidate_random_trials:

            layout.add_new_chip(candidate)
            # print layout.get_chip_positions()
            utils.info(1, "- Evaluating candidate " + str(candidate))
            result = find_maximum_power_budget(layout)
            if (result != None):
                [power_distribution, temperature] = result
                if (temperature <= utils.argv.max_allowed_temperature) and (
                        sum(power_distribution) > max_power):
                    picked_candidate = candidate
            try:
                layout.remove_chip(layout.get_num_chips() - 1)
            except Exception:
                utils.abort(
                    "Fatal error: Graph shouldn't be disconnected here!!")

        # Add the candidate
        utils.info(1, "Picked candidate: " + str(candidate))
        layout.add_new_chip(picked_candidate)

    # Do the final evaluation (which was already be done, but whatever)
    result = find_maximum_power_budget(layout)
    if (result == None):
        return None

    [power_distribution, temperature] = result

    return [layout, power_distribution, temperature]
Ejemplo n.º 60
0
Archivo: train.py Proyecto: yxd886/fuse
def compare(pred, real):
    sort_pred = sorted(pred)
    sort_real = sorted(real)
    pred_index = [sort_pred.index(item) for item in pred]
    real_index = [sort_real.index(item) for item in real]
    info("real rank index:", real_index)
    info("pred rank index:", pred_index)
    for i in range(len(pred)):
        for j in range(len(pred)):
            ratio = real[i] / real[j]
            if ratio > compare_ratio and pred[i] <= pred[j]:
                info("wrong compare for index {} and index {}".format(i, j))
                return False
    return True