Beispiel #1
0
    def populate(self):
        logger.info("Populating Current VM instance ....")
        try:
            imds_url = config.get('imds', 'imds_url')
            response = requests.get(imds_url, headers={"Metadata":"true"})
            response_txt = json.loads(response.text)

            #populate required instance variables
            self.vmId = response_txt['vmId']
            self.name = response_txt['name']
            self.location = response_txt['location']
            self.subscriptionId = response_txt['subscriptionId']
            self.vmScaleSetName = response_txt['vmScaleSetName']
            self.resourceGroupName = response_txt['resourceGroupName']
            self.tags = response_txt['tags']

            #populate access_token
            accesstoken_url = config.get('imds', 'accesstoken_url')

            access_token_response = requests.get(accesstoken_url, headers={"Metadata":"true"})
            access_token_text = json.loads(access_token_response.text)
            self.access_token = access_token_text['access_token']

            logger.info("Returning populated VMInstance")
        except:
            logger.error("Error populating vm instance")

        return self
Beispiel #2
0
def post_metrics():
    global cpu_percent
    logger.info("Posting Custom metrics.....")

    metric_post_url = config.get('monitor', 'metric_post_url')

    formatted_url = metric_post_url.format(location = vmInstance.location, 
                    subscriptionId = vmInstance.subscriptionId, \
                    resourceGroupName = vmInstance.resourceGroupName,\
                    resourceName = vmInstance.name)

    data = getMetricPostData(cpu_percent)
    logger.info("Data: " + json.dumps(data))

    headers = config.get('monitor', 'metric_headers');
    headers = json.loads(headers)
    headers['Content-Length'] = str(len(data))
    headers['Authorization'] = "Bearer " + vmInstance.access_token

    #formatted_headers = headers.format(clength = len(data))
    #formatted_headers['Authorization'] = "Bearer " + vmInstance.access_token
    logger.info("headers: " + json.dumps(headers))


    requests.post(formatted_url, json=data, headers=headers)
Beispiel #3
0
 def __init__(self):
     LOGIN   =  config.get("setting","username")
     PASS    = config.get("setting","password")
     self.post_params = urllib.urlencode({
                             'login_username' : LOGIN,
                             'login_password' : PASS,
                             'login' : '%C2%F5%EE%E4'
                                     })
     self._connect()
     self.check_url('107803')
 def __init__(self):
     LOGIN = config.get("setting", "username")
     PASS = config.get("setting", "password")
     self.post_params = urllib.urlencode({
         'login_username': LOGIN,
         'login_password': PASS,
         'login': '******'
     })
     self._connect()
     self.check_url('107803')
Beispiel #5
0
 def __init__(self):
     self.query = ""
     self.entity_id = ""
     self.username = config.get('username')
     self.password = config.get('password')
     self.basicUrl = ""
     self.includedProperties = ['Name']
     if config.get_project_var('user_id') is None:
         self._set_user()
     if config.get_project_var('process_id') is None:
         self._set_process_id()
Beispiel #6
0
def getEngine():
	type = config.get('db','Type')
	database = config.get('db', 'Database')
	user = config.get('db', 'User')
	password = config.get('db', 'Password')
	host = config.get('db', 'Host')
	echo = config.getboolean('db', 'Echo')
	
	connectionString = type + '://' + user + ':' + password + '@' + host + '/' + database
	
	return create_engine(connectionString, echo=echo)
def isInstanceinPendingDelete():
    deleteTag = config.get('imds', 'pending_delete_tag')

    if deleteTag in vmInstance.tags:
        return True
    else:
        return False
    def isPendingDelete(self):
        deleteTag = config.get('imds', 'pending_delete_tag')

        if deleteTag in self.tags:
            return True
        else:
            return False
Beispiel #9
0
def fail_with_error(log, error):

    log.error(error)

    send_email(config.get('EMAIL', 'ERRORS_RECIPIENT'),
               'LVC PIPELINE FAILURE: ', error)

    sys.exit(-1)
def deleteVMFromVMSS():
    logger.info("Deleting the VM from VMSS")

    vm_delete_url =  config.get('vmss', 'vm_delete_url')
    formatted_url = vm_delete_url.format(subscriptionId = vmInstance.subscriptionId, \
         resourceGroupName = vmInstance.resourceGroupName,\
              vmScaleSetName = vmInstance.vmScaleSetName, instanceId = vmInstance.vmId)

    logger.info("The Delete URL is - " +  formatted_url)

    requests.delete(formatted_url, data={}, auth=BearerAuth(vmInstance.access_token))
Beispiel #11
0
def download_fundamental_data(fundatmental_url,
                              fundamental_file_name,
                              output_adapter_functor=None):
    list_stocks = []
    for stock in stock_list:
        print("Getting info [%s] of [%s]" % (fundatmental_url, stock))
        response = requests.get(fundatmental_url % stock, verify=False)
        if output_adapter_functor:
            try:
                list_stocks.extend(output_adapter_functor(response.json()))
            except NotReturnData as nrd:
                print("Err: %s %s %s" % (stock, str(nrd), response.text))
        else:
            list_stocks.append(response.json())
        time.sleep(default_configuration.get("Download_Delay"))
    dataframe = pd.DataFrame(list_stocks)
    file_name = fundamental_file_name % (day_str)
    dataframe.to_csv(
        os.path.join(default_configuration.get("Download_folder"), file_name))
    print("Export info [%s]" % file_name)
def failLoadBalancerProbes():
    logger.info("Failing Health Probes")
    try:
        kill_health_probe = config.get('shell-commands', 'kill_health_probe_process')
         # Delete all cron jobs
        kill_process = os.system(kill_health_probe)

        if kill_process is not 0:
            logger.error("Error killing health probe")
    except:
        logger.error("Error in failing health probe")
def stopCustomMetricFlow():
    logger.info("Stopping the Custom Metrics")
    removeCrontab = config.get('shell-commands', 'remove_all_crontab')

    #removeCrontab = "crontab -r"
    
    logger.info("Deleting all cron jobs")
   
    # Delete all cron jobs
    areCronsRemoved = os.system(removeCrontab)

    if areCronsRemoved is not 0:
        logger.error("Error deleting Cron jobs, health probe will not fail")
    def _fix_isotropic_diffuse_path(self, tree):

        # Find Isotropic template in this system
        # If we are at slac, force the use of the same templates that have been simulated
        slac_simulated_templates_path = config.get("SLAC", "SIM_DIFFUSE_PATH")

        if os.path.exists(slac_simulated_templates_path):

            log.info("Forcing the use of templates in directory %s" %
                     slac_simulated_templates_path)

            os.environ['GTBURST_TEMPLATE_PATH'] = config.get(
                "SLAC", "SIM_DIFFUSE_PATH")

        templ = findTemplate(IRFS.IRFS[self._irfs].isotropicTemplate)

        # Now update the XML tree with the new location for the template

        # NOTE: this assume that the tree has been generated by gtburst

        src, = tree.findall("source[@name='IsotropicTemplate']")

        src.findall("spectrum")[0].set("file", templ)
Beispiel #15
0
 def __init__(self):
     from configuration import config
     config = config.get()
     self.spark = None
     self.i94path = config.get("File", "i94Path")
     self.demoPath = config.get("File", "demoPath")
     self.airportPath = config.get("File", "airportPath")
     self.tempreturePath = config.get("File", "tempreturePath")
     self.paqutRoot = config.get("File", "paqutRoot")
def deleteVMFromVMSS():
    logger.info("Deleting the VM from VMSS, Id: " + vmInstance.vmId)

    vm_delete_url = config.get('vmss', 'vm_delete_url')
    formatted_url = vm_delete_url.format(subscriptionId = vmInstance.subscriptionId, \
         resourceGroupName = vmInstance.resourceGroupName,\
              vmScaleSetName = vmInstance.vmScaleSetName)

    logger.info("The Delete URL is - " + formatted_url)

    data = {"instanceIds": [vmInstance.vmId]}

    headers = {}
    headers['Authorization'] = "Bearer " + vmInstance.access_token

    requests.post(formatted_url, data=data, headers=headers)
Beispiel #17
0
    if not args.triggername.find("bn") == 0:

        # Need to add "bn" at the beginning

        triggername = "bn%s" % args.triggername

    else:

        # "bn" is already present

        triggername = args.triggername

    # First we need to figure out the version, i.e., if a task for this GW event has been already run
    # This will be something like /nfs/farm/g/glast/u26/GWPIPELINE/input/bnGW150914

    this_trigger_input_dir = os.path.join(config.get("SLAC", "INPUT_DIR"),
                                          triggername)

    # Find existing existing_versions
    existing_versions = glob.glob(os.path.join(this_trigger_input_dir, 'v*'))

    if len(existing_versions) == 0:

        # There are no existing existing_versions
        version = 'v00'

    else:

        # Remove absolute path
        existing_versions = map(os.path.basename, existing_versions)
Beispiel #18
0
def getCurrentTicket():
    return config.get('hostname') + "/entity/" + getCurrentTicketNumber()
Beispiel #19
0
#
# You should have received a copy of the GNU Lesser General Public License
# along with ffmonitor.  If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
from configuration import config
import logging
import rrdtool
from NameDB import NameDB
import os


logger = logging.getLogger('monitor')
namedb = NameDB()

daemon = config.get('rrd', 'daemon')
graph_path = config.get('graph', 'path')

# define witch graphs to generate
gen_graphs = {'hour': '60min', 'day': '24h', 'week': '168h', 'month': '744h',
              'year': '8760h' }

# rrdtool specific stuff
# for node graphs
node_defs = ['DEF:gclient={}:clients:AVERAGE',
             'DEF:gwifi={}:wifilinks:AVERAGE',
             'DEF:gvpn={}:vpns:AVERAGE']
node_lines = []
node_lines.append('COMMENT:          ')
node_lines.append('COMMENT:         Cur')
node_lines.append('COMMENT:       Max')
    for x in hexatrig[::-1]:
        if x.isdigit():
            vmid += int(x) * multiplier
        else:
            # convert letter to corresponding integer
            vmid += (ord(x) - 55) * multiplier
        multiplier *= 36
    return vmid

# Check the value of Platform.PendingDeletionTime tag of IMDS
if (isPendingDelete == False):
    logger.info('exit : ' + str(isPendingDelete))
    sys.exit(1)

# Get App GW Backend status check URL and App GW name
appGatewayUrl = config.get('appgw', 'appgw_behealth_url')
appGateway = config.get('appgw', 'appgw_name')

formatted_url = appGatewayUrl.format(subscriptionId = metadata.subscriptionId, \
                resourceGroupName = metadata.resourceGroupName,\
                appGatewayName = appGateway)

# Getting App GW backend health URI
try:
    r = requests.post(formatted_url, headers = {}, auth=BearerAuth(metadata.access_token))
except requests.exceptions.RequestException as e:
    logger.info("error : " + str(e))
    sys.exit(1)

# Waiting for another api to check the result.
time.sleep(timeSleep)
Beispiel #21
0
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#     * Redistributions of source code must retain the above copyright
#       notice, this list of conditions and the following disclaimer.
#     * Redistributions in binary form must reproduce the above copyright
#       notice, this list of conditions and the following disclaimer in the
#       documentation and/or other materials provided with the distribution.
#     * Neither the name of Patrick Uiterwijk nor the
#       names of its contributors may be used to endorse or promote products
#       derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Patrick Uiterwijk BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker

from configuration import config

engine = create_engine(config.get('database', 'URI'), echo=bool(config.get('database', 'echo')))
session = sessionmaker(bind=engine)
Beispiel #22
0
    config.read_file(f)

# constants
use_gpu = config.getboolean("training",
                            "use_gpu") and torch.cuda.is_available()


# paths
def mkdir(path):
    try:
        os.makedirs(path)
    except OSError:
        pass


output_path = config.get("testing", "output_path")
mkdir(output_path)

# models
encoder = NeuralNet(config.getint("dataset", "patch_size") *
                    config.getint("dataset", "patch_size") * 3,
                    100,
                    10,
                    activation=nn.Tanh)

# load the state
state_path = config.get("testing", "state_path")
encoder.load_state_dict(torch.load(state_path))

# move to gpu if needed
if use_gpu:
Beispiel #23
0
import requests
import pandas as pd
import time
import os
import datetime

import configuration.config as default_configuration

stock_list = default_configuration.get("Stocks")

now = datetime.datetime.now()
day_str = now.strftime("%Y%m%d")


class NotReturnData(Exception):
    pass


def download_fundamental_data(fundatmental_url,
                              fundamental_file_name,
                              output_adapter_functor=None):
    list_stocks = []
    for stock in stock_list:
        print("Getting info [%s] of [%s]" % (fundatmental_url, stock))
        response = requests.get(fundatmental_url % stock, verify=False)
        if output_adapter_functor:
            try:
                list_stocks.extend(output_adapter_functor(response.json()))
            except NotReturnData as nrd:
                print("Err: %s %s %s" % (stock, str(nrd), response.text))
        else:
Beispiel #24
0
from configuration import config
from operator import itemgetter
import logging
import rrdtool
import time
import os


logger = logging.getLogger('monitor')

# rrd needs some special threatment regarding the series start and end
# otherwise number of returned data points would be wrong
resolution = 60  
series_end = str(int(time.time() / resolution) * resolution)

os.chdir(config.get('rrd', 'path')) # avoid concatenating path and file name for fs operations
daemon = config.get('rrd', 'daemon')


def count_data(rrd_file):
# Count the total number of data points present within now and series_end.
# In addition, the number of unknown data points is returned.
    
    data = rrdtool.fetch(rrd_file,
                        'AVERAGE',
                        '--daemon', daemon,
                        '--resolution', str(resolution),
                        '--end', series_end,
                        '--start', 'end-86400s')
    unknown = 0
Beispiel #25
0
def submit_job(trigger_name,
               trigger_time,
               desired_tstart_met,
               desired_tstop_met,
               map_path,
               simulate=False):

    # Only submit the job if there is at least 1 ks of data after the trigger time

    if get_maximum_available_MET() <= desired_tstart_met + 1000.0:

        # No available data
        return -2

    else:

        # Make sure map exists
        if not os.path.exists(map_path):

            fail_with_error(log,
                            "submit_job: map %s does not exists!" % map_path)

        # Move it to SLAC

        slac_path = "%s/%s_%s" % (config.get(
            "SLAC", "MAP_DIR"), trigger_name, os.path.basename(map_path))

        cmd_line = 'rsync %s %s:%s' % (map_path, config.get(
            "SLAC", "SSH_HOST"), slac_path)

        execute_command(log, cmd_line)

        # Now fix the permission at SLAC
        log.info("Fixing permission at SLAC...")
        cmd_line = 'ssh %s chmod ga+rw %s' % (config.get(
            "SLAC", "SSH_HOST"), slac_path)
        execute_command(log, cmd_line)

        # Now submit the job at SLAC using ssh

        cmd_line = 'ssh %s' % config.get("SLAC", "SSH_HOST")

        command = os.path.join(config.get("SLAC", "FERMI_GW_TOOLKIT_PATH"),
                               'fermi_gw_toolkit', 'automatic_pipeline',
                               'p2_task_wrapper.py')

        cmd_line += ' python %s' % command

        cmd_line += ' --triggername %s' % trigger_name

        cmd_line += ' --triggertime %s' % trigger_time

        cmd_line += ' --tstart_met %s' % desired_tstart_met
        cmd_line += ' --tstop_met %s' % desired_tstop_met

        cmd_line += ' --map %s' % slac_path

        if simulate:

            cmd_line += " --simulate"

        try:

            execute_command(log, cmd_line)

        except:

            fail_with_error(
                log, "Could not execute %s. Traceback: \n\n %s" %
                (cmd_line, traceback.format_exc()))

        return 0
 def __init__(self):
     from configuration import config
     config = config.get()
     self.metaFile = config.get("File", "MetaData")
Beispiel #27
0
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#     * Redistributions of source code must retain the above copyright
#       notice, this list of conditions and the following disclaimer.
#     * Redistributions in binary form must reproduce the above copyright
#       notice, this list of conditions and the following disclaimer in the
#       documentation and/or other materials provided with the distribution.
#     * Neither the name of Patrick Uiterwijk nor the
#       names of its contributors may be used to endorse or promote products
#       derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Patrick Uiterwijk BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker

from configuration import config

engine = create_engine(config.get('database', 'URI'),
                       echo=bool(config.get('database', 'echo')))
session = sessionmaker(bind=engine)
Beispiel #28
0
#     * Redistributions in binary form must reproduce the above copyright
#       notice, this list of conditions and the following disclaimer in the
#       documentation and/or other materials provided with the distribution.
#     * Neither the name of Patrick Uiterwijk nor the
#       names of its contributors may be used to endorse or promote products
#       derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Patrick Uiterwijk BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

from pika import BlockingConnection, URLParameters, PlainCredentials

from configuration import config
from queue_handler import install_queues

connection = BlockingConnection(URLParameters(config.get('broker', 'URI'))) 

channel = connection.channel()
channel.basic_qos(prefetch_count=1)

# Register queues and callbacks
install_queues(channel)
Beispiel #29
0
from torch import optim
from torch.utils.data import DataLoader
from torchvision import utils as vutils

from configparser import ConfigParser

config = ConfigParser()
with open("stanosa.conf", "r") as f:
    config.read_file(f)

# KMeansconstants
use_gpu = config.getboolean("training",
                            "use_gpu") and torch.cuda.is_available()

# paths
output_path = config.get("training", "output_path")
state_path = os.path.join(output_path, "states")
for path in [output_path, state_path]:
    try:
        os.makedirs(path)
    except OSError:
        pass

# models
encoder = NeuralNet(config.getint("dataset", "patch_size") *
                    config.getint("dataset", "patch_size") * 3,
                    100,
                    10,
                    activation=nn.Tanh)

decoder = NeuralNet(10,
Beispiel #30
0
def send_notice(subject, text):

    send_email(config.get('EMAIL', 'ERRORS_RECIPIENT'), subject, text)
Beispiel #31
0
Logger module uses static programming language type syntax i.e. camelCase, don't know why they decided on that
but to keep the variables consistent naming convention follows the camelCase antecedent

Logging will be stored in a log server eventually where all logs can be viewed as required, this is why
the name of the microservice is important
"""
import os
import logging

#: Use the application config to control logging settings
from configuration import config
from configuration import APP_CONFIG, CURRENT_CONFIG

#: Get the environmental configuration or currently configured config
currentConfig = config.get(os.environ.get(APP_CONFIG) or CURRENT_CONFIG)

#: Necessary to differentiate these logs from logs from other micro services
#: especially when used by the service monitor to trace request path
applicationName = currentConfig.APP_NAME
logLevel = currentConfig.APP_LOG_LEVEL

logger = logging.getLogger(applicationName)
logger.setLevel(logLevel)

#: This can be redirected elsewhere for log recording
loggerHandler = logging.FileHandler(filename='application.log')
loggerHandler.setLevel(logLevel)

loggerFormatter = logging.Formatter(
    '%(asctime)s -:::- %(name)s -:::- %(levelname)s -:::- %(message)s ')
Beispiel #32
0
 def __init__(self):
     self.hostname = config.get('hostname')
Beispiel #33
0
import asyncio
import logging
import configuration.config as config

from urllib.parse import urlparse
from aiogram import Bot, types
from aiogram.dispatcher import Dispatcher
from aiogram.utils.executor import start_polling
from service.user_service import UserService
from aiogram.types import ParseMode
from service.raider_io_api_service import RaiderIoService
from aiogram.utils.markdown import *
from service.database_service import DatabaseService


API_TOKEN = config.get('api-token')

logging.basicConfig(level=logging.INFO)

loop = asyncio.get_event_loop()
bot = Bot(token=API_TOKEN, loop=loop)
dp = Dispatcher(bot)
db_service = DatabaseService()
userService = UserService(db_service)
raiderIoService = RaiderIoService(db_service)


@dp.message_handler(commands=['start', 'help'])
async def send_welcome(message: types.Message):
    logging.info('send_welcome')
    await message.reply("Hi!\nI'm Raider IO bot!.")
    def run_simulation(self, outfile='gwt_sim', seed=None, point_source=None):
        """

        :param outfile:
        :param seed:
        :param point_source: a tuple (name, ra, dec, index, energy_flux), where the energy flux is in erg/cm2/s between
        100 MeV and 100 GeV
        :return:
        """

        if point_source is not None:

            # Need to add a point source

            pts_source_name, ra, dec, index, energy_flux = point_source

            emin = (self._emin * u.MeV).to(u.erg).value
            emax = (self._emax * u.MeV).to(u.erg).value

            if index != -2.0:

                conv = (1.0 + index) / (2.0 + index) * (
                    pow(emax, index + 2) - pow(emin, index + 2)) / (
                        pow(emax, index + 1) - pow(emin, index + 1))

            else:

                conv = (emin) * (emax) / (emax - emin) * np.log(emax / emin)

            photon_flux = energy_flux / conv * (1 / u.cm**2 / u.s)

            photon_flux_gtobsim = photon_flux.to(1 / u.m**2 / u.s).value

            log.info("Photon flux for gtobssim: %.2g m^(-2) s^(-1)" %
                     (photon_flux_gtobsim))
            log.info("Conversion factor: %s" % conv)

            # Generate the point source XML
            temp_pts_xml = "my_point_source.xml"

            with open(temp_pts_xml, "w+") as f:

                src_def = '''
                            <source_library title="PointSource">
                            <source name="%s" flux="%s">
                                <spectrum escale="MeV">
                                    <particle name="gamma">
                                        <power_law emin="%s" emax="%s" gamma="%s"/>
                                    </particle>
                                    <celestial_dir ra="%s" dec="%s"/>
                                </spectrum>
                            </source>
                            </source_library>
                          ''' % (pts_source_name, photon_flux_gtobsim,
                                 self._emin, self._emax, float(index) *
                                 (-1), ra, dec)

                f.write(src_def)

            # Now generate a txt file containing the list of XML to use
            xml_list = "xml_list.txt"

            with open(xml_list, "w+") as f:

                with open(sanitize_filename(config.get("SLAC",
                                                       "SIM_XML"))) as ff:

                    lines = ff.readlines()

                f.writelines(lines)

                f.write("\n%s\n" % sanitize_filename(temp_pts_xml))

            # Add the new point source to the list of sources to simulate
            src_list = "srclist.txt"

            with open(src_list, "w+") as f:

                with open(sanitize_filename(config.get("SLAC",
                                                       "SIM_SRC_LIST"))) as ff:

                    lines = ff.readlines()

                f.writelines(lines)

                f.write("\n%s\n" % pts_source_name)

        else:

            xml_list = sanitize_filename(config.get("SLAC", "SIM_XML"))
            src_list = sanitize_filename(config.get("SLAC", "SIM_SRC_LIST"))

        # We need to setup the environment variable SKYMODEL_DIR before running gtobssim
        os.environ['SKYMODEL_DIR'] = config.get("SLAC", "SKYMODEL_DIR")

        # Gather arguments for gtobssim in a dictionary

        evroot = '__gw_sims'

        _gtobssim_args = {
            'emin':
            self._emin,
            'emax':
            self._emax,
            'edisp':
            'no',
            'infile':
            xml_list,
            'srclist':
            src_list,
            'scfile':
            self._ft2,
            'evroot':
            evroot,
            'simtime':
            self._simulation_time,
            'ltfrac':
            self._ltfrac,
            'tstart':
            self._tstart,
            'use_ac':
            'no',
            'irfs':
            self._irfs,
            'evtype':
            'none',
            'seed':
            seed if seed is not None else np.random.randint(
                int(1e4), int(1e9))
        }

        gtobssim_app = GtApp('gtobssim')

        log.info("About to start simulation")
        log.info("#### gtobsim output start #####")
        print("\n\n")

        gtobssim_app.run(**_gtobssim_args)

        print("\n\n")
        log.info("#### gtobsim output stop #####")

        # Now find out the FT1 file produced by the simulation
        event_files = glob.glob("%s_events_*.fits" % evroot)

        assert len(event_files
                   ) > 0, "Simulation failed, there are no ft1 files produced."

        # Track them as temp files so we'll clean them up at the end
        event_files = map(self._track_temp_file, event_files)

        # Merge the event files using gtselect

        # Make a text file with the list of ft1 files
        ft1_file_list = self._track_temp_file("__gw_sim_ft1_list.txt")

        with open(ft1_file_list, "w+") as f:

            for ft1 in event_files:

                f.write("%s\n" % ft1)

        gtselect_app = GtApp('gtselect')

        log.info("Merging simulated event files")
        log.info("#### gtselect output start #####")
        print("\n\n")

        gtselect_app.run(infile=ft1_file_list,
                         outfile=outfile,
                         ra=0.0,
                         dec=0.0,
                         rad=180.0,
                         tmin=self._tstart,
                         tmax=self._tstart + self._simulation_time,
                         emin=self._emin,
                         emax=self._emax,
                         zmax=180.0,
                         evclass=IRFS.IRFS[self._irfs].evclass,
                         evtype="INDEF",
                         convtype='-1')

        print("\n\n")
        log.info("#### gtselect output stop #####")

        # Now check how many events we had before gtselect
        n_simulated_events = 0

        for ft1 in event_files:

            with pyfits.open(ft1) as f:

                n_simulated_events += len(f['EVENTS'].data)

        # Now get the number of events which survived the cut
        n_simulated_events_after_cuts = 0

        with pyfits.open(outfile, mode='update') as f:

            n_simulated_events_after_cuts += len(f['EVENTS'].data)

            # Need to fix this because gtobssim writes "1", which is not an acceptable reprocessing
            # version for gtburst

            f[0].header['PROC_VER'] = 302

        assert n_simulated_events == n_simulated_events_after_cuts, "Some events were lost when cutting with gtselect!"

        log.info("Generated %s events of class %s" %
                 (n_simulated_events_after_cuts, self._irfs))

        self._cleanup()

        # Store for future use with its absolute path
        self._simulated_ft1 = sanitize_filename(outfile)
Beispiel #35
0
from configuration import config

DEBUG = config.get('DEBUG', False)
if DEBUG:
    import os
    import shutil
    try:
        shutil.rmtree('out/debug')
    except:
        pass
    try:
        os.mkdir('out/debug')
    except:
        pass

debug_counter = 0


def deb(im, name):
    global debug_counter
    if DEBUG:
        print name
        im.save('out/debug/' + str(debug_counter) + ' ' + name + '.png')
        debug_counter += 1


def random_color():
    from random import randint
    return (randint(0, 255), randint(0, 255), randint(0, 255))
Beispiel #36
0
def getTicketNumber(branch):
    match = re.match(config.get('branch_regex'), branch)
    if match is not None:
        return match.group(1)
    raise NoSuitableBranchFound(branch)
Beispiel #37
0
        rra_strs = ['RRA:{}:0:1:60', 
                    'RRA:{}:0:1:1440',
                    'RRA:{}:0.5:15:672',
                    'RRA:{}:0.5:60:744',
                    'RRA:{}:0.5:60:8760']

        rra = [rra_str.format(rra_type) for rra_type in rra_types
               for rra_str in rra_strs]
        
        rrdtool.create(filename,
                       '--step', '60',
                        data_src,
                        rra)


daemon = config.get('rrd', 'daemon')
rrd_path = config.get('rrd', 'path')


# save data to rrd files
def _update(data_src, clean_id, *args):
    filename = ''.join([rrd_path, clean_id, '.rrd'])
    filename = filename.encode('ascii', 'ignore')
    
    # ensure rrd file existence
    _create_rrd(filename, data_src)

    # concatenate date and values
    update_str = ':'.join([str(value) for value in args])
    rrdtool.update(filename,
                   '--daemon', daemon,
Beispiel #38
0
#     * Redistributions in binary form must reproduce the above copyright
#       notice, this list of conditions and the following disclaimer in the
#       documentation and/or other materials provided with the distribution.
#     * Neither the name of Patrick Uiterwijk nor the
#       names of its contributors may be used to endorse or promote products
#       derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Patrick Uiterwijk BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

from pika import BlockingConnection, URLParameters, PlainCredentials

from configuration import config
from queue_handler import install_queues

connection = BlockingConnection(URLParameters(config.get('broker', 'URI')))

channel = connection.channel()
channel.basic_qos(prefetch_count=1)

# Register queues and callbacks
install_queues(channel)
                           maps)

        assert len(
            this_maps
        ) > 0, "No maps for trigger %s (this should be impossible)" % trigger

        # Find the most recent map for this trigger
        most_recent_map = max(this_maps, key=os.path.getctime)

        log.info("Most recent map found is %s" % most_recent_map)

        # Submit job for this map
        cmd_line = "python "

        cmd_line += os.path.join(
            config.get("Stanford",
                       "FERMI_GW_TOOLKIT_PATH"), 'fermi_gw_toolkit',
            'automatic_pipeline', 'submit_pipeline2_task.py')

        cmd_line += " --tstop 10000"

        cmd_line += " --map %s" % most_recent_map

        if config.getboolean("Stanford", "SIMULATE"):

            cmd_line += " --simulate"

        try:

            execute_command(log, cmd_line)

        except subprocess.CalledProcessError as grepexc:
Beispiel #40
0
def get_ff_data():
    """requests new json data from a ff server

    if data was not modified, an exponential back off is used
    to try again 

    """
    file_modified_info = config.get('data', 'lastmodified')

    urls = config.items('dataurl')

    get_ff_data.alt_url += 1
    get_ff_data.alt_url = get_ff_data.alt_url % len(urls)

    modified_info = {}
    headers = {}

    try:
        with open(file_modified_info, 'rb') as fp:
            modified_info = pickle.load(fp)
    
    except IOError:
        logger.warning('could not open last modified info file: %s',
                       file_modified_info)
    
    except Exception:
        logger.error('could not read last modified info from file: %s',
                     file_modified_info)
    
    else:
        if not modified_info['date'] is None:
            headers['If-Modified-Since'] = modified_info['date']
        if not modified_info['etag'] is None:
            headers['If-None-Match'] = modified_info['etag']
    
    r = requests.get(urls[get_ff_data.alt_url][1], headers=headers, timeout=6)
    r.raise_for_status()
    
    if r.status_code == 304:
        get_ff_data.alt_url -= 1
        raise Exception('node list not modified')
    
    elif r.status_code == 200:
        # save modified info for future executions
        if not r.headers['Last-Modified'] is None:
            modified_info['date'] = r.headers['Last-Modified']
    
        else:
            # save current time if no header was present
            now = datetime.now()
            stamp = mktime(now.timetuple())
            modified_info['date'] = format_date_time(stamp)

        if not r.headers['ETag'] is None:
            modified_info['etag'] = r.headers['ETag']
    
        else:
            modified_info['etag'] = None

        try:
            with open(file_modified_info, 'w') as fp:
                pickle.dump(modified_info, fp)
    
        except Exception:
            logger.error('could not write last modified info to file: %s', file_modified_info)

        data_date = str(int(email.utils.mktime_tz(email.utils.parsedate_tz(modified_info['date']))))
        return(r.json, data_date)
    
    else:
        raise Exception('unexpected status code')