예제 #1
0
def train(model_name):
    #Get the lables from the .env file
    labels = env('LABELS')
    size = int(env('IMG_SIZE'))
    #create the train data (format the training images)
    train_data = create_train_data()

    tf.reset_default_graph()

    convnet = network(size, labels)

    model = tflearn.DNN(convnet, tensorboard_dir='log')

    #if the model already exists, load it so we are not training from scratch
    if os.path.exists('{}.meta'.format(model_name)):
        model.load(model_name)
        print('model loaded!')

    X = np.array([i[0] for i in train_data]).reshape(-1, size, size, 1)
    Y = [i[1] for i in train_data]

    model.fit(X, Y, n_epoch=50)

    #save the model in the models folder
    model.save('../models/' + model_name)
    print("here")
    print(type(model))
    print("here")
    return model
예제 #2
0
def test(model_name):
    labels = env('LABELS')
    size = int(env('IMG_SIZE'))

    #format the images that are uploaded
    test_upload = process_test_data(env('TEST_UPLOAD'))

    convnet = network(size, labels)
    model = tflearn.DNN(convnet, tensorboard_dir='log')

    #load the model for testing
    model.load('models/' + model_name)

    for num, data in enumerate(test_upload):
        img_num = data[1]
        img_data = data[0]

        orig = img_data
        data = img_data.reshape(size, size, 1)
        model_out = model.predict([data])[0]

        print(model_out)
        print(np.argmax(model_out))

        str_label = labels[np.argmax(model_out)]

        #output as a text
        print(str_label)
예제 #3
0
파일: shell.py 프로젝트: xinni-ge/eclcli
    def get_base_parser(self):
        parser = argparse.ArgumentParser(
            prog='ceilometer',
            description=__doc__.strip(),
            epilog='See "ceilometer help COMMAND" '
                   'for help on a specific command.',
            add_help=False,
            formatter_class=HelpFormatter,
        )

        # Global arguments
        parser.add_argument('-h', '--help',
                            action='store_true',
                            help=argparse.SUPPRESS,
                            )

        parser.add_argument('--version',
                            action='version',
                            version=__init__.__version__)

        parser.add_argument('-d', '--debug',
                            default=bool(utils.env('MONITORINGCLIENT_DEBUG')
                                         ),
                            action='store_true',
                            help='Defaults to env[MONITORINGCLIENT_DEBUG].')

        parser.add_argument('-v', '--verbose',
                            default=False, action="store_true",
                            help="Print more verbose output.")

        parser.add_argument('--timeout',
                            default=600,
                            type=_positive_non_zero_int,
                            help='Number of seconds to wait for a response.')

        parser.add_argument('--ceilometer-url', metavar='<CEILOMETER_URL>',
                            dest='os_endpoint',
                            default=utils.env('CEILOMETER_URL'),
                            help=("DEPRECATED, use --os-endpoint instead. "
                                  "Defaults to env[CEILOMETER_URL]."))

        parser.add_argument('--ceilometer_url',
                            dest='os_endpoint',
                            help=argparse.SUPPRESS)

        parser.add_argument('--ceilometer-api-version',
                            default=utils.env(
                                'CEILOMETER_API_VERSION', default='2'),
                            help='Defaults to env[CEILOMETER_API_VERSION] '
                            'or 2.')

        parser.add_argument('--ceilometer_api_version',
                            help=argparse.SUPPRESS)

        self.auth_plugin.add_opts(parser)
        self.auth_plugin.add_common_opts(parser)

        return parser
def stage_upgrade():
    sh('rm -rf ${UPGRADE_STAGEDIR}')
    sh('mkdir -p ${UPGRADE_STAGEDIR}')
    sh('cp -R ${OBJDIR}/packages/Packages ${UPGRADE_STAGEDIR}/')
    # If RESTART is given, save that
    if env('RESTART'):
       sh('echo ${RESTART} > ${UPGRADE_STAGEDIR}/RESTART')

    # And if REBOOT is given, put that in FORCEREBOOT
    if env('REBOOT'):
       sh('echo ${REBOOT} > ${UPGRADE_STAGEDIR}/FORCEREBOOT')
    sh('rm -f ${BE_ROOT}/release/LATEST')
    sh('ln -sf ${UPGRADE_STAGEDIR} ${BE_ROOT}/release/LATEST')
예제 #5
0
def stage_upgrade():
    sh('rm -rf ${UPGRADE_STAGEDIR}')
    sh('mkdir -p ${UPGRADE_STAGEDIR}')
    sh('cp -R ${OBJDIR}/packages/Packages ${UPGRADE_STAGEDIR}/')
    # If RESTART is given, save that
    if env('RESTART'):
        sh('echo ${RESTART} > ${UPGRADE_STAGEDIR}/RESTART')

    # And if REBOOT is given, put that in FORCEREBOOT
    if env('REBOOT'):
        sh('echo ${REBOOT} > ${UPGRADE_STAGEDIR}/FORCEREBOOT')
    sh('rm -f ${BE_ROOT}/release/LATEST')
    sh('ln -sf ${UPGRADE_STAGEDIR} ${BE_ROOT}/release/LATEST')
def stage_upgrade():
    sh('rm -rf ${UPGRADE_STAGEDIR}')
    sh('mkdir -p ${UPGRADE_STAGEDIR}')
    sh('cp -R ${OBJDIR}/packages/Packages ${UPGRADE_STAGEDIR}/')

    # Move any validation scripts back
    for v in "ValidateInstall", "ValidateUpdate":
        if os.path.exists(e('${UPGRADE_STAGEDIR}/Packages/${v}')):
            sh(e("mv ${UPGRADE_STAGEDIR}/Packages/${v} ${UPGRADE_STAGEDIR}/${v}"))
            
    # If RESTART is given, save that
    if env('RESTART'):
       sh('echo ${RESTART} > ${UPGRADE_STAGEDIR}/RESTART')

    # And if REBOOT is given, put that in FORCEREBOOT
    if env('REBOOT'):
       sh('echo ${REBOOT} > ${UPGRADE_STAGEDIR}/FORCEREBOOT')
    sh('rm -f ${BE_ROOT}/release/LATEST')
    sh('ln -sf ${UPGRADE_STAGEDIR} ${BE_ROOT}/release/LATEST')
예제 #7
0
def test_debug(model_name):
    # Only import when called
    import matplotlib.pyplot as plt

    labels = env('LABELS')
    size = int(env('IMG_SIZE'))
    # if you need to create the data:
    test_data = process_test_data(env('TEST_DIR'))
    # if you already have some saved:
    # test_data = np.load('test_data.npy')

    convnet = network(size, labels)
    model = tflearn.DNN(convnet, tensorboard_dir='log')

    #load the model for testing
    model.load('../models/' + model_name)

    fig = plt.figure()

    #plot the test images along with the label as identified by the model
    for num, data in enumerate(test_data):
        img_num = data[1]
        img_data = data[0]

        y = fig.add_subplot(2, 2, num + 1)
        orig = img_data
        data = img_data.reshape(size, size, 1)
        model_out = model.predict([data])[0]

        print(model_out)
        print(np.argmax(model_out))

        str_label = labels[np.argmax(model_out)]

        y.imshow(orig, cmap='gray')
        plt.title(str_label)
        y.axes.get_xaxis().set_visible(False)
        y.axes.get_yaxis().set_visible(False)

    plt.show()
def stage_upgrade():
    sh('rm -rf ${UPGRADE_STAGEDIR}')
    sh('mkdir -p ${UPGRADE_STAGEDIR}')
    sh('cp -R ${OBJDIR}/packages/Packages ${UPGRADE_STAGEDIR}/')

    # If an update validation script is given, copy that
    if os.path.exists(e('${PROFILE_ROOT}/ValidateUpdate')):
        sh('cp ${PROFILE_ROOT}/ValidateUpdate ${UPGRADE_STAGEDIR}/ValidateUpdate')
    if os.path.exists(e('${PROFILE_ROOT}/ValidateInstall')):
        sh('cp ${PROFILE_ROOT}/ValidateUpdate ${UPGRADE_STAGEDIR}/ValidateInstall')

    # Allow the environment to over-ride it -- /dev/null or empty string means
    # don't have one
    if env('VALIDATE_UPDATE') is not None:
        if env('VALIDATE_UPDATE') not in ("/dev/null", ""):
            sh('cp ${VALIDATE_UPDATE} ${UPGRADE_STAGEDIR}/ValidateUpdate')
        else:
            sh('rm -f ${UPGRADE_STAGEDIR}/ValidateUpdate')
    if env('VALIDATE_INSTALL') is not None:
        if env('VALIDATE_INSTALL') not in ("/dev/null", ""):
            sh('cp ${VALIDATE_INSTALL} ${UPGRADE_STAGEDIR}/ValidateInstall')
        else:
            sh('rm -f ${UPGRADE_STAGEDIR}/ValidateInstall')

    # If RESTART is given, save that
    if env('RESTART'):
       sh('echo ${RESTART} > ${UPGRADE_STAGEDIR}/RESTART')

    # And if REBOOT is given, put that in FORCEREBOOT
    if env('REBOOT'):
       sh('echo ${REBOOT} > ${UPGRADE_STAGEDIR}/FORCEREBOOT')
    sh('rm -f ${BE_ROOT}/release/LATEST')
    sh('ln -sf ${UPGRADE_STAGEDIR} ${BE_ROOT}/release/LATEST')
예제 #9
0
def stage_upgrade():
    sh('rm -rf ${UPGRADE_STAGEDIR}')
    sh('mkdir -p ${UPGRADE_STAGEDIR}')
    sh('cp -R ${OBJDIR}/packages/Packages ${UPGRADE_STAGEDIR}/')

    # If an update validation script is given, copy that
    if os.path.exists(e('${PROFILE_ROOT}/ValidateUpdate')):
        sh('cp ${PROFILE_ROOT}/ValidateUpdate ${UPGRADE_STAGEDIR}/ValidateUpdate'
           )
    if os.path.exists(e('${PROFILE_ROOT}/ValidateInstall')):
        sh('cp ${PROFILE_ROOT}/ValidateUpdate ${UPGRADE_STAGEDIR}/ValidateInstall'
           )

    # Allow the environment to over-ride it -- /dev/null or empty string means
    # don't have one
    if env('VALIDATE_UPDATE') is not None:
        if env('VALIDATE_UPDATE') not in ("/dev/null", ""):
            sh('cp ${VALIDATE_UPDATE} ${UPGRADE_STAGEDIR}/ValidateUpdate')
        else:
            sh('rm -f ${UPGRADE_STAGEDIR}/ValidateUpdate')
    if env('VALIDATE_INSTALL') is not None:
        if env('VALIDATE_INSTALL') not in ("/dev/null", ""):
            sh('cp ${VALIDATE_INSTALL} ${UPGRADE_STAGEDIR}/ValidateInstall')
        else:
            sh('rm -f ${UPGRADE_STAGEDIR}/ValidateInstall')

    # If RESTART is given, save that
    if env('RESTART'):
        sh('echo ${RESTART} > ${UPGRADE_STAGEDIR}/RESTART')

    # And if REBOOT is given, put that in FORCEREBOOT
    if env('REBOOT'):
        sh('echo ${REBOOT} > ${UPGRADE_STAGEDIR}/FORCEREBOOT')
    sh('rm -f ${BE_ROOT}/release/LATEST')
    sh('ln -sf ${UPGRADE_STAGEDIR} ${BE_ROOT}/release/LATEST')
def main():
    """configure and start crawler"""
    settings = dict(
        COMPOSE_EMAIL=utils.env('COMPOSE_EMAIL'),
        COMPOSE_ACCOUNT_NAME=utils.env('COMPOSE_ACCOUNT_NAME'),
        COMPOSE_PASSWORD=utils.env('COMPOSE_PASSWORD'),
        COMPOSE_CLUSTER_ID=utils.env('COMPOSE_CLUSTER_ID'),
        DATADOG_API_KEY=utils.env('DATADOG_API_KEY'),
        DATADOG_APP_KEY=utils.env('DATADOG_APP_KEY'),
        DATADOG_TAGS=utils.env('DATADOG_TAGS', ''),
        USER_AGENT='Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',
        ROBOTSTXT_OBEY=True,
        LOG_LEVEL=utils.env('LOG_LEVEL', 'WARNING'),
        CHECK_INTERVAL_SECONDS=int(utils.env('CHECK_INTERVAL_SECONDS', 600)),
    )
    configure_logging(settings)
    loop = task.LoopingCall(crawl, settings=settings)
    loop.start(settings.get('CHECK_INTERVAL_SECONDS'))
    reactor.run()
예제 #11
0
def copy_validators():
    # If an update validation script is given, copy that
    if os.path.exists(e('${PROFILE_ROOT}/ValidateUpdate')):
        sh('cp ${PROFILE_ROOT}/ValidateUpdate ${pkgdir}/Packages/ValidateUpdate')
    if os.path.exists(e('${PROFILE_ROOT}/ValidateInstall')):
        sh('cp ${PROFILE_ROOT}/ValidateUpdate ${pkgdir}/Packages/ValidateInstall')
        
    # Allow the environment to over-ride it -- /dev/null or empty string means
    # don't have one
    if env('VALIDATE_UPDATE') is not None:
        if env('VALIDATE_UPDATE') not in ("/dev/null", ""):
            sh('cp ${VALIDATE_UPDATE} ${pkgdir}/Packages/ValidateUpdate')
        else:
            sh('rm -f ${pkgdir}/Packages/ValidateUpdate')
    if env('VALIDATE_INSTALL') is not None:
        if env('VALIDATE_INSTALL') not in ("/dev/null", ""):
            sh('cp ${VALIDATE_INSTALL} ${pkgdir}/Packages/ValidateInstall')
        else:
            sh('rm -f ${pkgdir}/Pckages/ValidateInstall')

    for p in "ValidateUpdate", "ValidateInstall":
        if os.path.exists(os.path.join(e('${pkgdir}'), "Packages", p)):
            validators.append(e('-V ${pkgdir}/Packages/' + p))
예제 #12
0
def create_train_data():
    #get the images from the training directory
    directory = env('TRAIN_DIR')

    #format the images into numpy array
    training_data = []
    for img in os.listdir(directory):
        # ignore dot files
        if (img.startswith('.')): continue
        label = label_img(img)

        path = os.path.join(directory, img)
        img = format_image(path)

        training_data.append([np.array(img), np.array(label)])

    return np.array(training_data)
예제 #13
0
import utils, requests

ISLAND_URL = utils.env("ISLAND_URL", "http://localhost:8000")


def generate_file(map_gen_id, file_map_id):
    resp = requests.post("%s/mapa/generate/%s/file/%s" %
                         (ISLAND_URL, map_gen_id, file_map_id))
    if resp.status_code == 200:
        return resp.json()['id']
    raise Exception("Generation failed: [%s] - %s" %
                    (resp.status_code, resp.text))
예제 #14
0
import utils, json, hvac, logging

S_VAULT_HOST = utils.env("S_VAULT_HOST", 'localhost')
S_VAULT_SCHEMA = utils.env("S_VAULT_SCHEMA", 'http')
S_VAULT_PORT = int(utils.env("S_VAULT_PORT", 8200))
S_VAULT_TOKEN = utils.env("S_VAULT_TOKEN")

vault_url = "%s://%s:%i" % (S_VAULT_SCHEMA, S_VAULT_HOST, S_VAULT_PORT)
client = hvac.Client(url=vault_url, token=S_VAULT_TOKEN)


def put(key, value):
    logging.info("vault_put: [%s] => [%s]" % (key, value))
    secret = json.loads(value) if type(value) == str else value
    client.secrets.kv.v2.create_or_update_secret(path=key, secret=secret)
예제 #15
0
import uuid
import utils
import logging
import pystache

from contextlib import contextmanager
import pyrqlite.dbapi2 as dbapi2

logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)

RQLITE_SERVER = utils.env('RQLITE_SERVER', 'localhost')
RQLITE_PORT = utils.env('RQLITE_PORT', 4001, int)


def get_connection(conn=None):

    if conn is not None:
        return conn

    # Connect to the database
    _conn = dbapi2.connect(
        host=RQLITE_SERVER,
        port=RQLITE_PORT,
    )
    logger.debug("connection opened")
    return _conn


@contextmanager
def get_closable_connection():
예제 #16
0
#! /usr/bin/env python
# encoding:utf8
# vim: set ts=4 et sw=4 sts=4

"""
File: statistic_words_freqency.py
Author: "shanzi"
Email: "*****@*****.**"
Created At: 2013-05-30 22:31
Description: primary statistic of english words freq take place in tech-concerned website
"""

from  utils import env; env()

import re

import pymongo
from pymongo import MongoClient
from Stemmer import Stemmer
from searcher.ignorewords import EN as EN_IGNORE
from clint.textui import progress,colored


EN_WORD_CUT = re.compile(r"[^a-zA-Z]*")
client = MongoClient()
db = client.fetch_data

def run():
    stemmer = Stemmer("english")
    pages = db.en.find()
    print colored.yellow("statistic words") 
예제 #17
0
import rabbitmq, utils
import json, yaml, logging

SPRING_PROFILE = utils.env("SPRING_PROFILE", "")

# json with prefix/sufix by flavor
PREFIX_BAG = utils.env(
    "PREFIX_BAG", '''{ "spring": { "prefix": "spring/", "sufix": "/data"} }''')
_prefix_bag = json.loads(PREFIX_BAG)
logging.info("prefix_bag=%s" % str(_prefix_bag))

# json with output format by storage (you can have a .yaml that seed consul as json)
STORAGE_OUTPUT = utils.env("STORAGE_OUTPUT",
                           '''{ "consul": "yaml", "vault": "json" }''')
_storage_output = json.loads(STORAGE_OUTPUT)
logging.info("storage_outputs=%s", str(_storage_output))

exchange = utils.env('EXCHANGE', 'seeder')
queue_name = utils.env('QUEUE', 'seeder-transform')
input_routing_key = utils.env('INPUT_RK', 'transform')
output_routing_key = utils.env('OUTPUT_RK', 'seed')

conn = rabbitmq.get_connection()
rabbitmq.create_exchange(exchange, conn)
rabbitmq.create_queue(queue_name, exchange, input_routing_key, conn)


def load_custom_transformer():
    tb, ta = None, None

    try:
예제 #18
0
@app.route("/log", methods=['POST'])
async def log(request):
    logger.info("received => [%s]" % request.body)
    return text('OK')


@app.route("/rabbitmq/<exchange>/<routing_key>", methods=['POST'])
async def log(request, exchange, routing_key):
    rabbitmq.publish(exchange, routing_key, request.body)
    return text('OK')


@app.route("/health")
async def health(request):
    return text('OK')


if __name__ == "__main__":

    import utils

    APP_PORT = int(utils.env("APP_PORT", 8000))
    APP_WORKERS = int(utils.env("APP_WORKERS", 1))

    exchange = utils.env('EXCHANGE')
    if exchange:
        rabbitmq.create_exchange(exchange)

    app.run(host="0.0.0.0", port=APP_PORT, workers=APP_WORKERS)
예제 #19
0
    if poudriere_proc and poudriere_proc.pid:
        try:
            os.kill(poudriere_proc.pid, signal.SIGTERM)
        except OSError:
            pass


def cleanup_gui():
    # FIXME: This triggers a kernel panic possible because of long path names
    # with spaces and double dashes in it.
    info('Cleaning up gui modules')
    sh('rm -rf ${BE_ROOT}/gui/node_modules')


if __name__ == '__main__':
    if env('SKIP_PORTS'):
        info('Skipping ports build as instructed by setting SKIP_PORTS')
        sys.exit(0)

    cleanup_gui()
    create_overlay()
    on_abort(cleanup_env)
    obtain_jail_name()
    calculate_make_jobs()
    create_poudriere_config()
    create_make_conf()
    create_ports_list()
    prepare_jail()
    merge_port_trees()
    prepare_env()
    signal.signal(signal.SIGTERM, terminate)
예제 #20
0

@app.route("/mapa/<id>/templars", methods=['GET'])
async def get_mapa(request, id):
    return json(mapa.get_templars(id))


@app.route("/mapa/<id>", methods=['DELETE'])
async def delete(request, id):
    mapa.delete(id)
    return text('OK')


@app.route("/mapa/<id>/generate", methods=['POST'])
async def delete(request, id):
    mapa.start_map_generation(id, request.json)
    return text('OK')


@app.route("/mapa/generate/<map_gen_id>/file/<file_map_id>", methods=['POST'])
async def delete(request, map_gen_id, file_map_id):
    return json({'id': mapa.generate_file(map_gen_id, file_map_id)})


if __name__ == "__main__":

    APP_PORT = utils.env("APP_PORT", 8000, int)
    APP_WORKERS = utils.env("APP_WORKERS", 2, int)

    app.run(host="0.0.0.0", port=APP_PORT, workers=APP_WORKERS)
예제 #21
0
import json
import utils
import requests

TEMPLAR_URL = utils.env("TEMPLAR_URL", "http://localhost:8001")


def compile(id, config):
    resp = requests.post("%s/%s/compile" % (TEMPLAR_URL, id),
                         data=json.dumps(config))
    if resp.status_code == 200:
        return resp.text
    raise Exception("Failed to compile template '%s. %s - %s" %
                    (id, resp.status_code, resp.text))
예제 #22
0
import utils
import consul, logging

S_CONSUL_HOST = utils.env("S_CONSUL_HOST", "localhost")
S_CONSUL_PORT = int(utils.env("S_CONSUL_PORT", 8500))

c = consul.Consul(host=S_CONSUL_HOST, port=S_CONSUL_PORT, scheme='http')

def put(id, value):
	logging.info("consul_put: [%s] => [%s]" % (id, value))
	c.kv.put(id, value)
예제 #23
0
import json, logging
import rabbitmq, utils
import consul_client, vault_client

exchange = utils.env('EXCHANGE', 'seeder')
queue_name = utils.env('QUEUE', 'seed')
input_routing_key = utils.env('INPUT_RK', 'seed')
output_routing_key = utils.env('OUTPUT_RK', 'seeded')

conn = rabbitmq.get_connection()
rabbitmq.create_exchange(exchange, conn)
rabbitmq.create_queue(queue_name, exchange, input_routing_key, conn)


def notify_seed_app(app):
    rabbitmq.publish(exchange, output_routing_key, {'app': app}, conn)


def seed(message):

    key = message['key']
    value = message['value']
    storage = message['storage']
    app = message['app']

    if storage == 'consul':
        consul_client.put(key, value)
        notify_seed_app(app)

    elif storage == 'vault':
        vault_client.put(key, value)
예제 #24
0
        except Exception:
            raise ValueError('YAML value is expected, but got: %s' % value)
        return value

    def __repr__(self):
        return "YAML data"


def generate_output_name():
    file_name = "shaker_%s.json" % utils.strict(str(datetime.datetime.now()))
    return os.path.join(tempfile.gettempdir(), file_name)


COMMON_OPTS = [
    cfg.Opt('server-endpoint',
            default=utils.env('SHAKER_SERVER_ENDPOINT'),
            required=True,
            type=Endpoint(),
            help='Address for server connections (host:port), '
            'defaults to env[SHAKER_SERVER_ENDPOINT].'),
    cfg.IntOpt('polling-interval',
               default=(utils.env('SHAKER_POLLING_INTERVAL')
                        or DEFAULT_POLLING_INTERVAL),
               help='How frequently the agent polls server, in seconds')
]

OPENSTACK_OPTS = [
    cfg.StrOpt('os-auth-url',
               metavar='<auth-url>',
               default=utils.env('OS_AUTH_URL'),
               sample_default='',
예제 #25
0
import rabbitmq, utils
import json, logging, requests

# load configs
exchange = utils.env('EXCHANGE', 'rabbitmqp')
output_exchange = utils.env('OUTPUT_EXCHANGE')

queue = utils.env('QUEUE', 'my-queue')
input_rk = utils.env('INPUT_RK', 'input')
output_rk = utils.env('OUTPUT_RK')
post_url = utils.env('POST_URL')

# get rabbit connection
conn = rabbitmq.get_connection()

# create exchanges
rabbitmq.create_exchange(exchange, conn)
if output_exchange and output_exchange != exchange:
	rabbitmq.create_exchange(output_exchange, conn)

# create queue's and bind's
rabbitmq.create_queue(queue, exchange, input_rk, conn)

# message consumer callback
def callback(ch, method, properties, body):

	# call backend
	if post_url is not None:
		logging.info("Posting message to url %s" % post_url)

		resp = requests.post(post_url, data=body)
예제 #26
0
import os
import utils
import requests

API_URL=utils.env("API_URL", "http://localhost:8000")

def get_file(id):
	return requests.get("%s/storage/file/%s" % (API_URL, id))

def delete_file(id):
	return requests.delete("%s/storage/file/%s" % (API_URL, id))

def k8s_bounce_app(app):
	return requests.get("%s/k8s/bounce/%s" % (API_URL, app))
예제 #27
0
from sanic import Sanic
from sanic.log import logger
from sanic.response import json
from sanic.response import text

import os
import fstore
import rabbitmq, utils

exchange = utils.env('EXCHANGE', 'seeder')
output_routing_key = utils.env('OUTPUT_RK', 'split')
KUBE_BOUNCE_COMMAND = utils.env(
    'KUBE_BOUNCE_COMMAND',
    '''kubectl set env deployment/%s --env="LAST_MANUAL_RESTART=$(date +%%s)"'''
)

rabbitmq.create_exchange(exchange)
app = Sanic()


@app.route("/seed/<store>/<flavor>", methods=['POST'])
async def seed(request, store, flavor):
    id = fstore.save(request.files["file"][0].body)
    extension = request.files["file"][0].name.split('.')[-1]
    name = request.files["file"][0].name.split('.')[0]
    message = {
        'id': id,
        'storage': store,
        'name': name,
        'type': extension if extension not in ['yaml', 'yml'] else 'yaml',
        'flavor': flavor
예제 #28
0
    node_modules = e('${GUI_STAGEDIR}/node_modules')

    os.chdir(e('${GUI_STAGEDIR}'))
    sh('npm install')
    sh('${node_modules}/.bin/gulp deploy --output=${GUI_DESTDIR}',
       log=logfile,
       mode='a')


def create_plist():
    with open(e('${GUI_DESTDIR}/gui-plist'), 'w') as f:
        for i in walk('${GUI_DESTDIR}'):
            if not os.path.isdir(e('${GUI_DESTDIR}/${i}')):
                f.write(e('/usr/local/www/gui/${i}\n'))

        with open(e('${GUI_STAGEDIR}/custom-plist')) as c:
            f.write(c.read())


if __name__ == '__main__':
    if env('SKIP_GUI'):
        info('Skipping GUI build as instructed by setting SKIP_GUI')
        sys.exit(0)

    info('Building GUI')
    info('Log file: {0}', logfile)
    cleandirs()
    copy()
    install()
    create_plist()
예제 #29
0
파일: build-os.py 프로젝트: zfstor/build
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#####################################################################

import os
import sys
from dsl import load_profile_config
from utils import sh, sh_str, env, e, objdir, info, debug, pathjoin

config = load_profile_config()
arch = env('TARGET_ARCH', 'amd64')
makeconfbuild = objdir('make-build.conf')
kernconf = objdir(e('${KERNCONF}'))
kernconf_debug = objdir(e('${KERNCONF}-DEBUG'))
kernlog = objdir('logs/buildkernel')
kerndebuglog = objdir('logs/buildkernel-debug')
worldlog = objdir('logs/buildworld')
makejobs = None


def calculate_make_jobs():
    global makejobs

    jobs = sh_str('sysctl -n kern.smp.cpus')
    if not jobs:
        makejobs = 2
예제 #30
0
import logging, json
import utils, rabbitmq, island

logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)

# rabbit config
exchange = utils.env('EXCHANGE', 'octopus')
queue_name = utils.env('QUEUE', 'island_file_gen')
routing_key = utils.env('INPUT_RK', 'file_generate')

rabbit_conn = rabbitmq.get_connection()
rabbitmq.create_exchange(exchange, rabbit_conn);
rabbitmq.create_queue(queue_name, exchange, routing_key, rabbit_conn)

def file_generate_callback(ch, method, properties, body):
	logger.info("Generating file [%s]" % body)
	p_body = json.loads(body)
	map_gen_id = p_body['map_gen_id']
	file_map_id = p_body['file_map_id']

	try:
		gen_id = island.generate_file(map_gen_id, file_map_id)
		logger.info("map_gen_file_id: %s", gen_id)
	except Exception as e:
		logger.error(e)
		rabbitmq.send_to_retry("Failed", properties)

rabbitmq.consumer(queue_name, file_generate_callback, rabbit_conn)
예제 #31
0
    if poudriere_proc and poudriere_proc.pid:
        try:
            os.kill(poudriere_proc.pid, signal.SIGTERM)
        except OSError:
            pass


def cleanup_gui():
    # FIXME: This triggers a kernel panic possible because of long path names
    # with spaces and double dashes in it.
    info('Cleaning up gui modules')
    sh('rm -rf ${BE_ROOT}/gui/node_modules')


if __name__ == '__main__':
    if env('SKIP_PORTS'):
        info('Skipping ports build as instructed by setting SKIP_PORTS')
        sys.exit(0)

    cleanup_gui()
    create_overlay()
    on_abort(cleanup_env)
    obtain_jail_name()
    calculate_make_jobs()
    create_poudriere_config()
    create_make_conf()
    create_ports_list()
    prepare_jail()
    merge_port_trees()
    prepare_env()
    signal.signal(signal.SIGTERM, terminate)
예제 #32
0
import os, pika, json
import utils, logging

MQ_HOST = utils.env("MQ_HOST", "localhost")
MQ_PORT = int(utils.env("MQ_PORT", 5672))
MQ_USER = utils.env("MQ_USER", "guest")
MQ_PASS = utils.env("MQ_PASS", "guest")
MQ_VHOST = utils.env("MQ_VHOST", "/")

MQ_TTL_RETRY = int(utils.env("MQ_TTL_RETRY", 30000))  # 30s
MQ_TTL_LONG_RETRY = int(utils.env("MQ_TTL_LONG_RETRY",
                                  10 * MQ_TTL_RETRY))  # 10x30s = 300s = 5min
MQ_MAX_RETRIES = int(utils.env("MQ_MAX_RETRIES", 10))  # 5min retries
MQ_MAX_LONG_RETRIES = int(utils.env("MQ_MAX_LONG_RETRIES",
                                    10))  # + 50min long retries
MQ_PRE_FETCH = int(utils.env("MQ_PRE_FETCH", 1))

credentials = pika.PlainCredentials(MQ_USER, MQ_PASS)
connection_params = pika.ConnectionParameters(MQ_HOST, MQ_PORT, MQ_VHOST,
                                              credentials)


class RetryException(Exception):
    pass


class LongRetryException(Exception):
    pass


def get_connection():
예제 #33
0
import logging, json
import utils, api_client, rabbitmq

exchange = utils.env('EXCHANGE', 'seeder')
queue_name = utils.env('QUEUE', 'seeder-split')
input_routing_key = utils.env('INPUT_RK', 'split')
output_routing_key = utils.env('OUTPUT_RK', 'transform')

conn = rabbitmq.get_connection()
rabbitmq.create_exchange(exchange, conn)
rabbitmq.create_queue(queue_name, exchange, input_routing_key, conn)


def callback(ch, method, properties, body):

    p_body = json.loads(body)
    id = p_body['id']
    type = p_body['type']
    message = dict(p_body)

    # get file by id
    resp = api_client.get_file(id)
    if resp.status_code != 200:
        error_msg = "Failed to get file %s. [HTTP_STATUS=%i] - %s" % (
            id, resp.status_code, resp.text)
        rabbitmq.send_to_retry(error_msg, properties)

    raw_data = resp.text
    logging.info("raw_file=[%s]" % raw_data)

    if type == 'yaml':
예제 #34
0
    0: ("models/ob_rms/osco.pt", craft.OneStoppedCarOEnv),
    1: ("models/ob_rms/osc.pt", craft.OneStoppedCarEnv),
    2: ("models/ob_rms/2sc.pt", craft.TwoStoppedCarsEnv),
    3: ("models/ob_rms/3sc.pt", craft.ThreeStoppedCarsSSO),
}
num_episodes = 10
render = False
discrete = False
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
intervals = 10
all_buckets = {}
bmax = 0
plt.figure(figsize=(10, 6))

for env_id, (model_name, env) in all_envs.items():
    eval_env = env(discrete=discrete)
    eval_env.debug['action_buckets'] = True
    if not os.path.isfile(model_name):
        print("Not trained, run with -train")
        exit(0)
    policy = utils.torch.load_empty_policy(learn.PolicyPPO,
                                           "models/gym_spaces.pt",
                                           hidden=64)
    ob_rms = policy.load_model(model_name)
    utils.env.evaluate_ppo(policy,
                           ob_rms,
                           eval_env,
                           device,
                           num_episodes=num_episodes,
                           render=render,
                           discrete=discrete)
예제 #35
0
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#####################################################################

import os
import sys
from dsl import load_profile_config
from utils import sh, sh_str, env, e, setup_env, objdir, info, debug, error, pathjoin


config = load_profile_config()
arch = env('TARGET_ARCH', 'amd64')
makeconfbuild = objdir('make-build.conf')
kernconf = objdir(e('${KERNCONF}'))
kernconf_debug = objdir(e('${KERNCONF}-DEBUG'))
kernlog = objdir('logs/buildkernel')
kerndebuglog = objdir('logs/buildkernel-debug')
worldlog = objdir('logs/buildworld')
makejobs = None


def calculate_make_jobs():
    global makejobs

    jobs = sh_str('sysctl -n kern.smp.cpus')
    if not jobs:
        makejobs = 2