Exemplo n.º 1
0
def main():

    filepath, port, graphic = evaluate_args()

    if graphic:
        initGUI(filepath, port)
    else:
        if '/' in filepath:
            shutil.copy(filepath, './')
            filename = filepath.split('/')[-1]
        else:
            filename = filepath

        source = filename.split('.')[0] + '.inst'  # "arquivo.inst.c"
        functions = instrument.instrument(
            filename)  # Instrumenta o código-fonte original
        make.run(
            source, port
        )  # Compila o código-fonte instrumentado e faz upload para o Arduino

        if '/' in filepath:
            os.remove(filename)

        monitor.monitor(
            functions, port
        )  # Inicia o 'live-profiling' do código instrumentado sendo executado no Arduino
Exemplo n.º 2
0
def main(args):
  
  # CHECKS
  args.batchmode = not checkGUIMode(args.batchmode)
  
  # CONNECT
  print "Connecting to climate chamber..."
  chamber = connectClimateChamber()
  ymeteo1 = connectYoctoMeteo(YOCTO.ymeteo1)
  ymeteo2 = connectYoctoMeteo(YOCTO.ymeteo2)
  
  # RUN & MONITOR
  target   = args.target
  gradient = args.gradient
  airon    = not args.noair
  dryeron  = not args.nodryer
  startManualRun(chamber,target=target,gradient=gradient,air=airon,dryer=dryeron)
  monitor(chamber,ymeteo1,ymeteo2,batch=args.batchmode,out=args.output,
                 nsamples=args.nsamples,tstep=args.stepsize,twidth=args.twidth)
  stopManualRun(chamber)
  
  # DISCONNECT
  print "Closing connection..."
  chamber.disconnect()
  disconnectYoctoMeteo()
Exemplo n.º 3
0
def main(args):

    # CHECKS
    args.batchmode = not checkGUIMode(args.batchmode)

    # CONNECT
    print "Connecting to climate chamber..."
    chamber = connectClimateChamber()
    ymeteo1 = connectYoctoMeteo(YOCTO.ymeteo1)
    ymeteo2 = connectYoctoMeteo(YOCTO.ymeteo2)

    # RUN & MONITOR
    prgmid = args.prgmid
    nruns = args.nruns
    startProgram(chamber, prgmid, nruns)
    monitor(chamber,
            ymeteo1,
            ymeteo2,
            batch=args.batchmode,
            out=args.output,
            nsamples=args.nsamples,
            tstep=args.stepsize,
            twidth=args.twidth)
    stopProgram(chamber)

    # DISCONNECT
    print "Closing connection..."
    chamber.disconnect()
    disconnectYoctoMeteo()
Exemplo n.º 4
0
def Adadelta(
        network, training_data, epochs, mini_batch_size, eta,
        epsilon=0.00000001, lmbda=0.0, fraction = 0.9,
        evaluation_data=None,
        monitor_evaluation_cost=False,
        monitor_evaluation_accuracy=False,
        monitor_training_cost=False,
        monitor_training_accuracy=False):
    n = len(training_data)
    RMS_b = [(np.zeros(b.shape), np.zeros(b.shape)) for b in network.biases]
    RMS_w = [(np.zeros(w.shape), np.zeros(w.shape)) for w in network.weights]

    evaluation_cost, evaluation_accuracy = [], []
    training_cost, training_accuracy = [], []

    for j in xrange(epochs):
        random.shuffle(training_data)
        mini_batches = [
            training_data[k:k + mini_batch_size]
            for k in xrange(0, n, mini_batch_size)
            ]

        print "epochs[%d]" % j

        for mini_batch in mini_batches:
            RMS_b, RMS_w = update_mini_batch(
                network, mini_batch, eta, lmbda, n,
                epsilon, fraction, RMS_b, RMS_w
            )

        monitor(network, training_data, evaluation_data,
                training_cost, training_accuracy, evaluation_cost, evaluation_accuracy,
                lmbda,
                monitor_evaluation_cost, monitor_evaluation_accuracy,
                monitor_training_cost, monitor_training_accuracy)
Exemplo n.º 5
0
def SGD(network, training_data, epochs, mini_batch_size, eta,
        lmbda = 0.0,
        evaluation_data=None,
        monitor_evaluation_cost=False,
        monitor_evaluation_accuracy=False,
        monitor_training_cost=False,
        monitor_training_accuracy=False):

    n = len(training_data)

    evaluation_cost, evaluation_accuracy = [], []
    training_cost, training_accuracy = [], []

    for j in xrange(epochs):
        random.shuffle(training_data)
        mini_batches = [
            training_data[k:k+mini_batch_size]
            for k in xrange(0, n, mini_batch_size)
        ]

        print "epochs[%d]" % j

        for mini_batch in mini_batches:
            update_mini_batch(network, mini_batch, eta, lmbda, n)

        monitor(network, training_data, evaluation_data,
                training_cost,training_accuracy,evaluation_cost,evaluation_accuracy,
                lmbda,
                monitor_evaluation_cost, monitor_evaluation_accuracy,
                monitor_training_cost, monitor_training_accuracy)

    return training_cost, training_accuracy, evaluation_cost, evaluation_accuracy
Exemplo n.º 6
0
def monitor_job():
    global bad_request_count
    global bad_request_bar
    global last_status
    global last_miners

    try:
        monitor('miner_list.txt')
        bad_request_count = 0
        bad_request_bar = INITIAL_BAD_REQUEST_BAR
        if last_status != 0:
            requests.post(DINGTALK_URL.format(dingtalk_token),
                          json=make_payload("all miners are back to work now", at_all=True))
        last_status = 0

    except MiningError as err:
        bad_request_count = 0
        bad_request_bar = INITIAL_BAD_REQUEST_BAR
        if last_status == 0 or last_status == 1 and last_miners != err.miners:
            requests.post(DINGTALK_URL.format(dingtalk_token), json=make_payload(err.message, at_all=True))
        last_miners = err.miners
        last_status = 1

    except RequestError:
        bad_request_count += 1
        if bad_request_count >= bad_request_bar:
            requests.post(DINGTALK_URL.format(dingtalk_token),
                          json=make_payload("bad request on ConfluxScan API for {} times.".format(bad_request_count)))
            bad_request_bar *= 2

    except Exception as e:
        print('Unexpected error: ', e)
        requests.post(DINGTALK_URL.format(dingtalk_token), json=make_payload("Unexpected error: {}.".format(str(e))))
        raise
Exemplo n.º 7
0
def main(args):

    # CHECKS
    args.monitor = checkGUIMode(not args.monitor)

    # CONNECT
    print "Connecting to climate chamber..."
    chamber = connectClimateChamber()
    ymeteo1 = connectYoctoMeteo(YOCTO.ymeteo1)
    ymeteo2 = connectYoctoMeteo(YOCTO.ymeteo2)

    # STOP & MONITOR
    if args.warmup:
        chamber.forceWarmUp(args.target, args.gradient)
    else:
        chamber.stop()
    monitor(chamber,
            ymeteo1,
            ymeteo2,
            batch=args.monitor,
            out=args.output,
            nsamples=args.nsamples,
            tstep=args.stepsize,
            twidth=args.twidth)

    # DISCONNECT
    print "Closing connection..."
    chamber.disconnect()
    disconnectYoctoMeteo()
Exemplo n.º 8
0
def monitorear():
    # Llama a la función "system_log()".
    system_log("inicia monitoreo")
    messagebox.showwarning('Inicia monitoreo','Iniciando monitoreo\n Presionar "ESC" para salir.')
    # Inicia la función de monitoreo.
    mon.monitor()
    # Llama a la función "system_log()".
    system_log("finaliza monitoreo")
Exemplo n.º 9
0
def test_monitor(capsys):
    monitor(watch_path.decode(), check_ptrn='')
    out, err = capsys.readouterr()
    desired_output = '{0}/{1} {2}\n'.format(watch_path.decode(),
                                            filename.decode(), 42)
    desired_output += 'Updating {0}/{1}: {2}\n'.format(watch_path.decode(),
                                                       filename.decode(), 42)
    assert out == desired_output
Exemplo n.º 10
0
def thread_monitor_and_config(event_object):
    while True:
        with lock:
            logger.debug("Configurator is working...")
            configure()
            logger.debug("Network manager is working...")
            manage_network()
            logger.debug("Monitor is working...")
            monitor()
        event_object.wait(conf.get_send_monitoring_data_interval_config())
Exemplo n.º 11
0
def thread_monitor_and_config():
    while (True):
        with lock:
            print("")
            #logger.debug("[Config & Monitor] Other threads are locked!")

            #logger.debug("<--> Check monitor <-->")
            monitor()

            # logger.debug("<--> Check configurations <-->")
        #logger.debug("[Config & Monitor] Other threads are released!")
        time.sleep(INTERVAL_SEND_MONITOR)
Exemplo n.º 12
0
def thread_monitor_and_config(event_object):
    global modem
    while True:
        with lock:
            logger.debug("Configurator is working...")
            configure()
            logger.debug("Network manager is working...")
            network = manage_network(modem)
            logger.debug("Monitor is working...")
            monitor(modem, network)
            logger.debug("Geolocation is working...")
            update_geolocation(modem)
        event_object.wait(conf.get_send_monitoring_data_interval_config())
Exemplo n.º 13
0
 def read_file_input(self):
     #Read Inputs
     self.init_src = input_info.source_info(None,(0,0),datetime.datetime.now(),False)
     self.met = input_info.met_info(self.init_src, mode=global_settings.MET_FORMAT, dataset=global_settings.METFILE, test=bool(global_settings.METTEST)).get_met()
     #Read Reverse settings
     filepath = global_settings.REVERSE_FILES
     if len(filepath) == 0:
         raise Error("[Reverse Engine] ERROR: No input file")
     self.monitor_count = len(filepath)
     #Read all files, one file for one monitor
     maxvalue = -1; maxtick = None
     for f in filepath:
         fp = open(f)
         str_pos = fp.readline().strip().split(',')
         pos_x, pos_y = int(float(str_pos[0])), int(float(str_pos[1]))
         str_height = fp.readline().strip().split(',')
         height,sigma = float(str_height[0]), (0.0 if len(str_height) == 1 else float(str_height[1]))
         m = monitor((pos_x, pos_y, height))
         for line in fp.readlines():
             li = line.strip().split(',')
             ti, va = int(li[0]), float(li[1])
             if maxvalue < va:
                 maxvalue = va
                 maxtick = ti
             #If we open IGNORE MODE, all values less than 1.0e-5 will be filtered.
             if self.ignore_low_value and va <= 1.0e-5:
                 continue
             m.record[ti] = va
         fp.close()
         self.monitors.append(m)
Exemplo n.º 14
0
    def __init__(self, app):
        self.app = app
        self.display = display()
        self.keyboard = keyboard()
        self.mem = memmap()
        self.io = io(self.display, self.keyboard)
        self.cpu = z80.cpu(self.mem, self.io)
        self.mon = monitor.monitor(self.cpu)
        self.menu_root = (
            ('..', 'return to main menu', util.cr, self.parent_menu, None),
            ('da', 'disassemble memory', monitor._help_disassemble, self.mon.cli_disassemble, None),
            ('exit', 'exit the application', util.cr, self.exit, None),
            ('help', 'display general help', util.cr, app.general_help, None),
            ('memory', 'memory functions', None, None, self.mon.menu_memory),
            ('regs', 'display cpu registers', util.cr, self.mon.cli_registers, None),
            ('run', 'run the emulation', util.cr, self.cli_run, None),
            ('step', 'single step the emulation', util.cr, self.cli_step, None),
        )

        # setup the video window
        pygame.init()
        self.screen = pygame.display.set_mode((_screen_x, _screen_y))
        pygame.display.set_caption('Talking Electronics Computer TEC 1')
        self.display.refresh(self.screen)

        app.cli.set_poll(pygame.event.pump)
        app.cli.set_root(self.menu_root)
        self.app.cli.set_prompt('\ntec1> ')
Exemplo n.º 15
0
def handleJava(codeFilePath, javaMainClass, inputFile, outputFile, CEFile,
               REFile, timeLimit, memoryLimit):
    codeFileName = codeFilePath.split('/')[-1]
    if os.path.exists(codeFileName):
        # Rename to Main.java in order to prevent Compile Error
        renameProcess = subprocess.Popen(
            ['rename', codeFileName, f'{javaMainClass}.java', codeFileName])
        renameProcess.wait()
        # print(os.listdir(os.getcwd()))
        # os.system(f'rename {codeFileName} {javaMainClass}.java {codeFileName}')

    # os.system(f'javac {javaMainClass}.java 2>> {CEFile}')
    # os.system(f"java {javaMainClass} < {inputFile} 1>> {outputFile} 2>> {REFile}")

    compileProcess = subprocess.Popen(['javac', f'{javaMainClass}.java'],
                                      stderr=CEFile)
    compileProcess.wait()
    try:
        execProcess = subprocess.Popen(['java', javaMainClass],
                                       stdin=inputFile,
                                       stdout=outputFile,
                                       stderr=REFile)
    except IOError:
        return '', 0, 0, 0
    else:
        print(f'execProcess\'s pid:{execProcess.pid}')

        status, timeCost, memoryCost = monitor.monitor(process=execProcess,
                                                       timeLimit=timeLimit,
                                                       memoryLimit=memoryLimit)

        return status, timeCost, memoryCost, execProcess.returncode
Exemplo n.º 16
0
def handleCpp(codeFilePath, compileOutput, inputFile, outputFile, CEFile,
              REFile, timeLimit, memoryLimit):
    # Ignore all warnings during compiling
    # os.system(f"g++ {codeFilePath} -w -o {compileOutput} 2>> {CEFile}")
    # os.system(f"./{compileOutput} < {inputFile} 1>> {outputFile} 2>> {REFile}")

    compileProcess = subprocess.Popen(
        ['g++', codeFilePath, '-w', '-o', compileOutput], stderr=CEFile)
    compileProcess.wait()

    try:
        execProcess = subprocess.Popen([f'./{compileOutput}'],
                                       stdin=inputFile,
                                       stdout=outputFile,
                                       stderr=REFile)
    except IOError:
        return '', 0, 0, 0
    else:
        print(f'execProcess\'s pid:{execProcess.pid}')
        status, timeCost, memoryCost = monitor.monitor(
            process=execProcess,
            timeLimit=timeLimit,
            memoryLimit=memoryLimit,
        )
        print(execProcess.returncode)

        return status, timeCost, memoryCost, execProcess.returncode
Exemplo n.º 17
0
def handler():
    """
        Monitor a website and upload analytics to a database.
    """
    website_response = monitor()
    producer(website_response)

    response = create_http_response(200, "Success!")

    return response
Exemplo n.º 18
0
def subcmd_monitor(args):
    import monitor
    from resource import GLUSTER, SSH, Popen
    go_daemon = False if args.debug else True

    monitor.startup(go_daemon)
    Popen.init_errhandler()
    local = GLUSTER("localhost", args.master)
    slavehost, slavevol = args.slave.split("::")
    remote = SSH(slavehost, slavevol)
    return monitor.monitor(local, remote)
Exemplo n.º 19
0
 def __init__(self, n, filenames, gui=None):
     self.nodes = n
     self.partfile = list(filenames)
     self.completed = [0 for x in range(n)]
     self.threads = []
     self.parts = []
     self.gui = gui
     self.monitor = monitor(self, (self.gui != None))
     self.monitor.progressChange.connect(self.updateProgress)
     if (self.gui != None):
         self.gui.ui.Pause.clicked.connect(self.pauseMe)
Exemplo n.º 20
0
def subcmd_monitor(args):
    import monitor
    from resource import GLUSTER, SSH, Popen
    go_daemon = False if args.debug else True

    monitor.startup(go_daemon)
    Popen.init_errhandler()
    local = GLUSTER("localhost", args.master)
    slavehost, slavevol = args.slave.split("::")
    remote = SSH(slavehost, slavevol)
    return monitor.monitor(local, remote)
def wrap_dqn(env):
    """Apply a common set of wrappers for Atari games."""
    assert 'NoFrameskip' in env.spec.id
    env = EpisodicLifeEnv(env)
    env = NoopResetEnv(env, noop_max=30)
    env = monitor(env)
    env = ClippedRewardsWrapper(env)
    env = MaxAndSkipEnv(env, skip=4)
    if 'FIRE' in env.unwrapped.get_action_meanings():
        env = FireResetEnv(env)
    env = ProcessFrame84(env)
    env = FrameStack(env, 4)
    return env
Exemplo n.º 22
0
def NAG(network, training_data, epochs, mini_batch_size, eta,
       momentum=0.9, lmbda=0.0,
       evaluation_data=None,
       monitor_evaluation_cost=False,
       monitor_evaluation_accuracy=False,
       monitor_training_cost=False,
       monitor_training_accuracy=False):
    n = len(training_data)

    veloc_b = [np.zeros(b.shape) for b in network.biases]
    veloc_w = [np.zeros(w.shape) for w in network.weights]

    evaluation_cost, evaluation_accuracy = [], []
    training_cost, training_accuracy = [], []

    for j in xrange(epochs):
        random.shuffle(training_data)
        mini_batches = [
            training_data[k:k + mini_batch_size]
            for k in xrange(0, n, mini_batch_size)
            ]

        print "epochs[%d]" % j

        for mini_batch in mini_batches:
            veloc_b, veloc_w = update_mini_batch(
                network, mini_batch, eta,
                momentum, lmbda, n,
                veloc_b, veloc_w)

        monitor(network, training_data, evaluation_data,
                training_cost, training_accuracy, evaluation_cost, evaluation_accuracy,
                lmbda,
                monitor_evaluation_cost, monitor_evaluation_accuracy,
                monitor_training_cost, monitor_training_accuracy)

    return training_cost, training_accuracy, evaluation_cost, evaluation_accuracy
    pass
Exemplo n.º 23
0
def parse_monitors():
    toReturn=[]
    for line in subprocess.check_output(["bspc","query","-T"],universal_newlines=True).split('\n')[:-1]:
        if (line[0] != '\t'):
            # get dimensions from line  to use - eg name 1440x900+0+0
            # note: monitor names can have spaces.
            info = re.search('(.+) (\d+)x(\d+)\+(\d+)\+(\d+)', line )
            toReturn.append(monitor(*info.groups()))

    # Sort them left to right.
    toReturn.sort(key=lambda mon: mon.xoffset)
    # lemonbars -g flag yoffset appears to be in relation to current monitor, not xrootwin.

    return toReturn
Exemplo n.º 24
0
 def run(self,mode):
     print "Settings display:"
     self.check()
     if self.excFilePath == '' or self.fileSuffix == '' or self.websitePath == '':
         print "\033[0;31;40mSetting is not complete!\033[0m"
         sys.exit(0)
     # backup file option
     if self.backup == 'false':
         print "\033[0;31;40mWebsite files is not backup!\033[0m"
         sys.exit(0)
     # monitor files start
     #try:
     self.monitor=monitor.monitor(self.websitePath,mode,self.excFilePath,self.fileSuffix)
     self.monitor.run()
Exemplo n.º 25
0
def parse_monitors():
    toReturn = []
    for line in subprocess.check_output(
        ["bspc", "query", "-T"], universal_newlines=True).split('\n')[:-1]:
        if (line[0] != '\t'):
            # get dimensions from line  to use - eg name 1440x900+0+0
            # note: monitor names can have spaces.
            info = re.search('(.+) (\d+)x(\d+)\+(\d+)\+(\d+)', line)
            toReturn.append(monitor(*info.groups()))

    # Sort them left to right.
    toReturn.sort(key=lambda mon: mon.xoffset)
    # lemonbars -g flag yoffset appears to be in relation to current monitor, not xrootwin.

    return toReturn
Exemplo n.º 26
0
 def run(self, mode):
     print "Settings display:"
     self.check()
     if self.excFilePath == '' or self.fileSuffix == '' or self.websitePath == '':
         print "\033[0;31;40mSetting is not complete!\033[0m"
         sys.exit(0)
     # backup file option
     if self.backup == 'false':
         print "\033[0;31;40mWebsite files is not backup!\033[0m"
         sys.exit(0)
     # monitor files start
     #try:
     self.monitor = monitor.monitor(self.websitePath, mode,
                                    self.excFilePath, self.fileSuffix)
     self.monitor.run()
Exemplo n.º 27
0
def handlePy3(codeFilePath, inputFile, outputFile, REFile, timeLimit,
              memoryLimit):
    # os.system(f"python {codeFilePath} < {inputFile} 1>> {outputFile} 2>> {REFile}")
    execProcess = subprocess.Popen(['python', codeFilePath],
                                   stdin=inputFile,
                                   stdout=outputFile,
                                   stderr=REFile)
    print(f'execProcess\'s pid:{execProcess.pid}')

    status, timeCost, memoryCost = monitor.monitor(
        process=execProcess,
        timeLimit=timeLimit,
        memoryLimit=memoryLimit,
    )

    return status, timeCost, memoryCost, execProcess.returncode
Exemplo n.º 28
0
async def main():
    # Setup the site
    app = web.Application()
    app.add_routes([
        web.get(OPEN_TRACKING_URL, email_open),
        web.get(CLICK_TRACKING_URL, email_click),
    ])
    runner = web.AppRunner(app)
    await runner.setup()
    site = web.TCPSite(runner)

    # Add flushing + monitoring
    await asyncio.gather(
        site.start(),
        flush_opens(),
        flush_clicks(),
        monitor(),
    )
Exemplo n.º 29
0
def main():
    data = load_json()
    url = data.get("url")
    telegram_bot = bot(data.get("bot_key"), data.get("chat_id"))

    try:
        # Read previous hash.
        with open("prev_hash.txt", "r+") as file:
            prev_hash = file.read()

            my_monitor = monitor(url, telegram_bot)
            curr_hash = my_monitor.get_hash()
            if prev_hash != curr_hash:
                my_monitor.send_message("File changed")

            file.seek(0)
            file.write(curr_hash)

            file.close()
    except FileNotFoundError:
        open('prev_hash.txt', 'x').close()
        print("Previous hash not found.")
Exemplo n.º 30
0
    def __init__(self, app):
        self.app = app
        self.video = video()
        self.keyboard = keyboard()
        self.mem = memmap()
        self.io = io()
        self.cpu = z80.cpu(self.mem, self.io)
        self.mon = monitor.monitor(self.cpu)
        self.menu_root = (
            ('..', 'return to main menu', util.cr, self.parent_menu, None),
            ('char', 'display the character memory', util.cr, self.cli_char, None),
            ('da', 'disassemble memory', monitor._help_disassemble, self.mon.cli_disassemble, None),
            ('exit', 'exit the application', util.cr, self.exit, None),
            ('help', 'display general help', util.cr, app.general_help, None),
            ('memory', 'memory functions', None, None, self.mon.menu_memory),
            ('regs', 'display cpu registers', util.cr, self.mon.cli_registers, None),
            ('run', 'run the emulation', util.cr, self.cli_run, None),
            ('step', 'single step the emulation', util.cr, self.cli_step, None),
        )

        # create the hooks between video and memory
        self.mem.char.wr_notify = self.video.char_wr
        self.mem.video.wr_notify = self.video.video_wr
        self.video.mem = self.mem
        self.video.cmem = self.mem.char.rd

        # create the hooks between io and keyboard
        self.io.keyboard = self.keyboard.rd

        # setup the video window
        pygame.init()
        self.screen = pygame.display.set_mode((_screen_x, _screen_y))
        pygame.display.set_caption('Jupiter ACE')
        self.video.refresh(self.screen)

        app.cli.set_poll(pygame.event.pump)
        app.cli.set_root(self.menu_root)
        self.app.cli.set_prompt('\njace> ')
Exemplo n.º 31
0
 def read_file_input(self):
     #Read Inputs
     self.init_src = input_info.source_info(None, (0, 0),
                                            datetime.datetime.now(), False)
     self.met = input_info.met_info(self.init_src,
                                    mode=global_settings.MET_FORMAT,
                                    dataset=global_settings.METFILE,
                                    test=bool(
                                        global_settings.METTEST)).get_met()
     #Read Reverse settings
     filepath = global_settings.REVERSE_FILES
     if len(filepath) == 0:
         raise Error("[Reverse Engine] ERROR: No input file")
     self.monitor_count = len(filepath)
     #Read all files, one file for one monitor
     maxvalue = -1
     maxtick = None
     for f in filepath:
         fp = open(f)
         str_pos = fp.readline().strip().split(',')
         pos_x, pos_y = int(float(str_pos[0])), int(float(str_pos[1]))
         str_height = fp.readline().strip().split(',')
         height, sigma = float(str_height[0]), (0.0 if len(str_height) == 1
                                                else float(str_height[1]))
         m = monitor((pos_x, pos_y, height))
         for line in fp.readlines():
             li = line.strip().split(',')
             ti, va = int(li[0]), float(li[1])
             if maxvalue < va:
                 maxvalue = va
                 maxtick = ti
             #If we open IGNORE MODE, all values less than 1.0e-5 will be filtered.
             if self.ignore_low_value and va <= 1.0e-5:
                 continue
             m.record[ti] = va
         fp.close()
         self.monitors.append(m)
Exemplo n.º 32
0
def main(command):
    service = ' '.join(map(pipes.quote, command))
    with monitor(service):
        check_call(command)
Exemplo n.º 33
0
def main_i():
    """internal main routine

    parse command line, decide what action will be taken;
    we can either:
    - query/manipulate configuration
    - format gsyncd urls using gsyncd's url parsing engine
    - start service in following modes, in given stages:
      - agent: startup(), ChangelogAgent()
      - monitor: startup(), monitor()
      - master: startup(), connect_remote(), connect(), service_loop()
      - slave: startup(), connect(), service_loop()
    """
    rconf = {'go_daemon': 'should'}

    def store_abs(opt, optstr, val, parser):
        if val and val != '-':
            val = os.path.abspath(val)
        setattr(parser.values, opt.dest, val)

    def store_local(opt, optstr, val, parser):
        rconf[opt.dest] = val

    def store_local_curry(val):
        return lambda o, oo, vx, p: store_local(o, oo, val, p)

    def store_local_obj(op, dmake):
        return lambda o, oo, vx, p: store_local(
            o, oo, FreeObject(op=op, **dmake(vx)), p)

    op = OptionParser(
        usage="%prog [options...] <master> <slave>", version="%prog 0.0.1")
    op.add_option('--gluster-command-dir', metavar='DIR', default='')
    op.add_option('--gluster-log-file', metavar='LOGF',
                  default=os.devnull, type=str, action='callback',
                  callback=store_abs)
    op.add_option('--gluster-log-level', metavar='LVL')
    op.add_option('--gluster-params', metavar='PRMS', default='')
    op.add_option(
        '--glusterd-uuid', metavar='UUID', type=str, default='',
        help=SUPPRESS_HELP)
    op.add_option(
        '--gluster-cli-options', metavar='OPTS', default='--log-file=-')
    op.add_option('--mountbroker', metavar='LABEL')
    op.add_option('-p', '--pid-file', metavar='PIDF', type=str,
                  action='callback', callback=store_abs)
    op.add_option('-l', '--log-file', metavar='LOGF', type=str,
                  action='callback', callback=store_abs)
    op.add_option('--iprefix',  metavar='LOGD',  type=str,
                  action='callback', callback=store_abs)
    op.add_option('--changelog-log-file',  metavar='LOGF',  type=str,
                  action='callback', callback=store_abs)
    op.add_option('--log-file-mbr', metavar='LOGF', type=str,
                  action='callback', callback=store_abs)
    op.add_option('--state-file', metavar='STATF', type=str,
                  action='callback', callback=store_abs)
    op.add_option('--state-detail-file', metavar='STATF',
                  type=str, action='callback', callback=store_abs)
    op.add_option('--georep-session-working-dir', metavar='STATF',
                  type=str, action='callback', callback=store_abs)
    op.add_option('--ignore-deletes', default=False, action='store_true')
    op.add_option('--isolated-slave', default=False, action='store_true')
    op.add_option('--use-rsync-xattrs', default=False, action='store_true')
    op.add_option('--pause-on-start', default=False, action='store_true')
    op.add_option('-L', '--log-level', metavar='LVL')
    op.add_option('-r', '--remote-gsyncd', metavar='CMD',
                  default=os.path.abspath(sys.argv[0]))
    op.add_option('--volume-id', metavar='UUID')
    op.add_option('--slave-id', metavar='ID')
    op.add_option('--session-owner', metavar='ID')
    op.add_option('--local-id', metavar='ID', help=SUPPRESS_HELP, default='')
    op.add_option(
        '--local-path', metavar='PATH', help=SUPPRESS_HELP, default='')
    op.add_option('-s', '--ssh-command', metavar='CMD', default='ssh')
    op.add_option('--ssh-command-tar', metavar='CMD', default='ssh')
    op.add_option('--rsync-command', metavar='CMD', default='rsync')
    op.add_option('--rsync-options', metavar='OPTS', default='')
    op.add_option('--rsync-ssh-options', metavar='OPTS', default='--compress')
    op.add_option('--timeout', metavar='SEC', type=int, default=120)
    op.add_option('--connection-timeout', metavar='SEC',
                  type=int, default=60, help=SUPPRESS_HELP)
    op.add_option('--sync-jobs', metavar='N', type=int, default=3)
    op.add_option('--replica-failover-interval', metavar='N',
                  type=int, default=1)
    op.add_option('--changelog-archive-format', metavar='N',
                  type=str, default="%Y%m")
    op.add_option(
        '--turns', metavar='N', type=int, default=0, help=SUPPRESS_HELP)
    op.add_option('--allow-network', metavar='IPS', default='')
    op.add_option('--socketdir', metavar='DIR')
    op.add_option('--state-socket-unencoded', metavar='SOCKF',
                  type=str, action='callback', callback=store_abs)
    op.add_option('--checkpoint', metavar='LABEL', default='')

    # tunables for failover/failback mechanism:
    # None   - gsyncd behaves as normal
    # blind  - gsyncd works with xtime pairs to identify
    #          candidates for synchronization
    # wrapup - same as normal mode but does not assign
    #          xtimes to orphaned files
    # see crawl() for usage of the above tunables
    op.add_option('--special-sync-mode', type=str, help=SUPPRESS_HELP)

    # changelog or xtime? (TODO: Change the default)
    op.add_option(
        '--change-detector', metavar='MODE', type=str, default='xtime')
    # sleep interval for change detection (xtime crawl uses a hardcoded 1
    # second sleep time)
    op.add_option('--change-interval', metavar='SEC', type=int, default=3)
    # working directory for changelog based mechanism
    op.add_option('--working-dir', metavar='DIR', type=str,
                  action='callback', callback=store_abs)
    op.add_option('--use-tarssh', default=False, action='store_true')

    op.add_option('-c', '--config-file', metavar='CONF',
                  type=str, action='callback', callback=store_local)
    # duh. need to specify dest or value will be mapped to None :S
    op.add_option('--monitor', dest='monitor', action='callback',
                  callback=store_local_curry(True))
    op.add_option('--agent', dest='agent', action='callback',
                  callback=store_local_curry(True))
    op.add_option('--resource-local', dest='resource_local',
                  type=str, action='callback', callback=store_local)
    op.add_option('--resource-remote', dest='resource_remote',
                  type=str, action='callback', callback=store_local)
    op.add_option('--feedback-fd', dest='feedback_fd', type=int,
                  help=SUPPRESS_HELP, action='callback', callback=store_local)
    op.add_option('--rpc-fd', dest='rpc_fd', type=str, help=SUPPRESS_HELP)
    op.add_option('--listen', dest='listen', help=SUPPRESS_HELP,
                  action='callback', callback=store_local_curry(True))
    op.add_option('-N', '--no-daemon', dest="go_daemon",
                  action='callback', callback=store_local_curry('dont'))
    op.add_option('--verify', type=str, dest="verify",
                  action='callback', callback=store_local)
    op.add_option('--create', type=str, dest="create",
                  action='callback', callback=store_local)
    op.add_option('--delete', dest='delete', action='callback',
                  callback=store_local_curry(True))
    op.add_option('--debug', dest="go_daemon", action='callback',
                  callback=lambda *a: (store_local_curry('dont')(*a),
                                       setattr(
                                           a[-1].values, 'log_file', '-'),
                                       setattr(a[-1].values, 'log_level',
                                               'DEBUG'),
                                       setattr(a[-1].values,
                                               'changelog_log_file', '-')))
    op.add_option('--path', type=str, action='append')

    for a in ('check', 'get'):
        op.add_option('--config-' + a, metavar='OPT', type=str, dest='config',
                      action='callback',
                      callback=store_local_obj(a, lambda vx: {'opt': vx}))
    op.add_option('--config-get-all', dest='config', action='callback',
                  callback=store_local_obj('get', lambda vx: {'opt': None}))
    for m in ('', '-rx', '-glob'):
        # call this code 'Pythonic' eh?
        # have to define a one-shot local function to be able
        # to inject (a value depending on the)
        # iteration variable into the inner lambda
        def conf_mod_opt_regex_variant(rx):
            op.add_option('--config-set' + m, metavar='OPT VAL', type=str,
                          nargs=2, dest='config', action='callback',
                          callback=store_local_obj('set', lambda vx: {
                              'opt': vx[0], 'val': vx[1], 'rx': rx}))
            op.add_option('--config-del' + m, metavar='OPT', type=str,
                          dest='config', action='callback',
                          callback=store_local_obj('del', lambda vx: {
                              'opt': vx, 'rx': rx}))
        conf_mod_opt_regex_variant(m and m[1:] or False)

    op.add_option('--normalize-url', dest='url_print',
                  action='callback', callback=store_local_curry('normal'))
    op.add_option('--canonicalize-url', dest='url_print',
                  action='callback', callback=store_local_curry('canon'))
    op.add_option('--canonicalize-escape-url', dest='url_print',
                  action='callback', callback=store_local_curry('canon_esc'))

    tunables = [norm(o.get_opt_string()[2:])
                for o in op.option_list
                if (o.callback in (store_abs, 'store_true', None) and
                    o.get_opt_string() not in ('--version', '--help'))]
    remote_tunables = ['listen', 'go_daemon', 'timeout',
                       'session_owner', 'config_file', 'use_rsync_xattrs']
    rq_remote_tunables = {'listen': True}

    # precedence for sources of values: 1) commandline, 2) cfg file, 3)
    # defaults for this to work out we need to tell apart defaults from
    # explicitly set options... so churn out the defaults here and call
    # the parser with virgin values container.
    defaults = op.get_default_values()
    opts, args = op.parse_args(values=optparse.Values())
    args_orig = args[:]
    r = rconf.get('resource_local')
    if r:
        if len(args) == 0:
            args.append(None)
        args[0] = r
    r = rconf.get('resource_remote')
    if r:
        if len(args) == 0:
            raise GsyncdError('local resource unspecfied')
        elif len(args) == 1:
            args.append(None)
        args[1] = r
    confdata = rconf.get('config')
    if not (len(args) == 2 or
            (len(args) == 1 and rconf.get('listen')) or
            (len(args) <= 2 and confdata) or
            rconf.get('url_print')):
        sys.stderr.write("error: incorrect number of arguments\n\n")
        sys.stderr.write(op.get_usage() + "\n")
        sys.exit(1)

    verify = rconf.get('verify')
    if verify:
        logging.info(verify)
        logging.info("Able to spawn gsyncd.py")
        return

    restricted = os.getenv('_GSYNCD_RESTRICTED_')

    if restricted:
        allopts = {}
        allopts.update(opts.__dict__)
        allopts.update(rconf)
        bannedtuns = set(allopts.keys()) - set(remote_tunables)
        if bannedtuns:
            raise GsyncdError('following tunables cannot be set with '
                              'restricted SSH invocaton: ' +
                              ', '.join(bannedtuns))
        for k, v in rq_remote_tunables.items():
            if not k in allopts or allopts[k] != v:
                raise GsyncdError('tunable %s is not set to value %s required '
                                  'for restricted SSH invocaton' %
                                  (k, v))

    confrx = getattr(confdata, 'rx', None)

    def makersc(aa, check=True):
        if not aa:
            return ([], None, None)
        ra = [resource.parse_url(u) for u in aa]
        local = ra[0]
        remote = None
        if len(ra) > 1:
            remote = ra[1]
        if check and not local.can_connect_to(remote):
            raise GsyncdError("%s cannot work with %s" %
                              (local.path, remote and remote.path))
        return (ra, local, remote)
    if confrx:
        # peers are regexen, don't try to parse them
        if confrx == 'glob':
            args = ['\A' + fnmatch.translate(a) for a in args]
        canon_peers = args
        namedict = {}
    else:
        dc = rconf.get('url_print')
        rscs, local, remote = makersc(args_orig, not dc)
        if dc:
            for r in rscs:
                print(r.get_url(**{'normal': {},
                                   'canon': {'canonical': True},
                                   'canon_esc': {'canonical': True,
                                                 'escaped': True}}[dc]))
            return
        pa = ([], [], [])
        urlprms = (
            {}, {'canonical': True}, {'canonical': True, 'escaped': True})
        for x in rscs:
            for i in range(len(pa)):
                pa[i].append(x.get_url(**urlprms[i]))
        _, canon_peers, canon_esc_peers = pa
        # creating the namedict, a dict representing various ways of referring
        # to / repreenting peers to be fillable in config templates
        mods = (lambda x: x, lambda x: x[
                0].upper() + x[1:], lambda x: 'e' + x[0].upper() + x[1:])
        if remote:
            rmap = {local: ('local', 'master'), remote: ('remote', 'slave')}
        else:
            rmap = {local: ('local', 'slave')}
        namedict = {}
        for i in range(len(rscs)):
            x = rscs[i]
            for name in rmap[x]:
                for j in range(3):
                    namedict[mods[j](name)] = pa[j][i]
                namedict[name + 'vol'] = x.volume
                if name == 'remote':
                    namedict['remotehost'] = x.remotehost
    if not 'config_file' in rconf:
        rconf['config_file'] = os.path.join(
            os.path.dirname(sys.argv[0]), "conf/gsyncd_template.conf")

    upgrade_config_file(rconf['config_file'])
    gcnf = GConffile(
        rconf['config_file'], canon_peers,
        defaults.__dict__, opts.__dict__, namedict)

    checkpoint_change = False
    if confdata:
        opt_ok = norm(confdata.opt) in tunables + [None]
        if confdata.op == 'check':
            if opt_ok:
                sys.exit(0)
            else:
                sys.exit(1)
        elif not opt_ok:
            raise GsyncdError("not a valid option: " + confdata.opt)
        if confdata.op == 'get':
            gcnf.get(confdata.opt)
        elif confdata.op == 'set':
            gcnf.set(confdata.opt, confdata.val, confdata.rx)
        elif confdata.op == 'del':
            gcnf.delete(confdata.opt, confdata.rx)
        # when modifying checkpoint, it's important to make a log
        # of that, so in that case we go on to set up logging even
        # if its just config invocation
        if confdata.opt == 'checkpoint' and confdata.op in ('set', 'del') and \
           not confdata.rx:
            checkpoint_change = True
        if not checkpoint_change:
            return

    gconf.__dict__.update(defaults.__dict__)
    gcnf.update_to(gconf.__dict__)
    gconf.__dict__.update(opts.__dict__)
    gconf.configinterface = gcnf

    delete = rconf.get('delete')
    if delete:
        logging.info('geo-replication delete')
        # Delete pid file, status file, socket file
        cleanup_paths = []
        if getattr(gconf, 'pid_file', None):
            cleanup_paths.append(gconf.pid_file)

        if getattr(gconf, 'state_file', None):
            cleanup_paths.append(gconf.state_file)

        if getattr(gconf, 'state_detail_file', None):
            cleanup_paths.append(gconf.state_detail_file)

        if getattr(gconf, 'state_socket_unencoded', None):
            cleanup_paths.append(gconf.state_socket_unencoded)

        cleanup_paths.append(rconf['config_file'][:-11] + "*")

        # Cleanup changelog working dirs
        if getattr(gconf, 'working_dir', None):
            try:
                shutil.rmtree(gconf.working_dir)
            except (IOError, OSError):
                if sys.exc_info()[1].errno == ENOENT:
                    pass
                else:
                    raise GsyncdError(
                        'Error while removing working dir: %s' %
                        gconf.working_dir)

        for path in cleanup_paths:
            # To delete temp files
            for f in glob.glob(path + "*"):
                _unlink(f)
        return

    if restricted and gconf.allow_network:
        ssh_conn = os.getenv('SSH_CONNECTION')
        if not ssh_conn:
            # legacy env var
            ssh_conn = os.getenv('SSH_CLIENT')
        if ssh_conn:
            allowed_networks = [IPNetwork(a)
                                for a in gconf.allow_network.split(',')]
            client_ip = IPAddress(ssh_conn.split()[0])
            allowed = False
            for nw in allowed_networks:
                if client_ip in nw:
                    allowed = True
                    break
            if not allowed:
                raise GsyncdError("client IP address is not allowed")

    ffd = rconf.get('feedback_fd')
    if ffd:
        fcntl.fcntl(ffd, fcntl.F_SETFD, fcntl.FD_CLOEXEC)

    # normalize loglevel
    lvl0 = gconf.log_level
    if isinstance(lvl0, str):
        lvl1 = lvl0.upper()
        lvl2 = logging.getLevelName(lvl1)
        # I have _never_ _ever_ seen such an utterly braindead
        # error condition
        if lvl2 == "Level " + lvl1:
            raise GsyncdError('cannot recognize log level "%s"' % lvl0)
        gconf.log_level = lvl2

    if not privileged() and gconf.log_file_mbr:
        gconf.log_file = gconf.log_file_mbr

    if checkpoint_change:
        try:
            GLogger._gsyncd_loginit(log_file=gconf.log_file, label='conf')
            if confdata.op == 'set':
                logging.info('checkpoint %s set' % confdata.val)
                gcnf.delete('checkpoint_completed')
                gcnf.delete('checkpoint_target')
            elif confdata.op == 'del':
                logging.info('checkpoint info was reset')
                # if it is removing 'checkpoint' then we need
                # to remove 'checkpoint_completed' and 'checkpoint_target' too
                gcnf.delete('checkpoint_completed')
                gcnf.delete('checkpoint_target')

        except IOError:
            if sys.exc_info()[1].errno == ENOENT:
                # directory of log path is not present,
                # which happens if we get here from
                # a peer-multiplexed "config-set checkpoint"
                # (as that directory is created only on the
                # original node)
                pass
            else:
                raise
        return

    create = rconf.get('create')
    if create:
        if getattr(gconf, 'state_file', None):
            update_file(gconf.state_file, lambda f: f.write(create + '\n'))
        return

    go_daemon = rconf['go_daemon']
    be_monitor = rconf.get('monitor')
    be_agent = rconf.get('agent')

    rscs, local, remote = makersc(args)
    if not be_monitor and isinstance(remote, resource.SSH) and \
       go_daemon == 'should':
        go_daemon = 'postconn'
        log_file = None
    else:
        log_file = gconf.log_file
    if be_monitor:
        label = 'monitor'
    elif be_agent:
        label = 'agent'
    elif remote:
        # master
        label = gconf.local_path
    else:
        label = 'slave'
    startup(go_daemon=go_daemon, log_file=log_file, label=label)
    resource.Popen.init_errhandler()

    if be_agent:
        os.setsid()
        logging.debug('rpc_fd: %s' % repr(gconf.rpc_fd))
        return agent(Changelog(), gconf.rpc_fd)

    if be_monitor:
        return monitor(*rscs)

    logging.info("syncing: %s" % " -> ".join(r.url for r in rscs))
    if remote:
        go_daemon = remote.connect_remote(go_daemon=go_daemon)
        if go_daemon:
            startup(go_daemon=go_daemon, log_file=gconf.log_file)
            # complete remote connection in child
            remote.connect_remote(go_daemon='done')
    local.connect()
    if ffd:
        os.close(ffd)
    local.service_loop(*[r for r in [remote] if r])
Exemplo n.º 34
0
from monitor import monitor

def file_change_callback(filename, op):
    print "recompiling {0}".format(filename)
    with open(min_filename(filename), 'w') as f:
        f.write(_compile(filename))

def min_filename(filename):
    return filename.replace('.js', '.r0.min.js')

def _compile(js_file):
    """Compile the given file with a local closure compiler.
    """

    compiler = os.getenv('CLOSURE_COMPILER')
    command = ['java', '-jar', compiler, '--compilation_level',
               'SIMPLE_OPTIMIZATIONS', '--js', js_file]
    p = Popen(command, stdout=PIPE, stderr=PIPE)
    stderr = p.stderr.read()
    if stderr:
        sys.stderr.write(stderr)

    return p.stdout.read().rstrip()

if __name__ == "__main__":
    if len(sys.argv) < 2:
        print "Usage: python reminify.py <filename> [<filenames>]"
        sys.exit(1)

    monitor(sys.argv[1:], file_change_callback)
Exemplo n.º 35
0
def processMode():
    if options.mode == "generate":
        if not options.samplefile:
            print "(!) Please specify a sample file"
            sys.exit()  
        # create a new filefuzzer with our sample file
        fuzzer = fileFuzz(options.samplefile)
        fuzzer.setTechniqueToUse(options.technique)
        fuzzer.setModeToUse(options.mode)
        if options.fuzzfolder:
            fuzzer.setOutputFolder(options.fuzzfolder)
        else:
            print "(-) Please supply a fuzz folder to store fuzzed files"
            sys.exit()
        
        # if the file is real and has an extension..
        if fuzzer.isValidFile() and fuzzer.setAndCheckExtension():
            # no switch in python, checking for the techniques
            if options.technique == "byteflip":
                print ("(+) Fuzz generation based on byte flipping")
                print ("(+) Using a %s as the size" % (fuzzer.getFuzzLength()))
                print ("(+) Byte %s is set as the flip byte" % (fuzzer.getFuzzbyte()))
                print ("(+) Fuzzing between size offsets %s and %s" % (fuzzer.fuzzstart, fuzzer.fuzzend))
                generateModeConfirmationAndProceed(fuzzer)
                    
            elif options.technique == "buffersmash":
                print ("(+) Fuzz generation based on buffer smashing")
                print ("(+) Fuzzing between size offsets %s and %s" % (fuzzer.start_buffer_size, fuzzer.end_buffer_size))
                print ("(+) Creating fuzz increments of %s" % (fuzzer.getIncriments()))
                print ("(+) Using a '%s' for the location to fuzz at" % (fuzzer.getLocation()))
                print ("(+) Byte %s is set as the buffer value" % (fuzzer.getFuzzbyte()))
                generateModeConfirmationAndProceed(fuzzer)
            else:
                print "(-) Incorrect technique specified, check your settings"
                sys.exit()
        else:
            print "(-) Invalid sample file, check your path"
            sys.exit()
            
    elif options.mode == "fuzz":
        if not options.executable:
            print "(!) Please specify an executable to fuzz"
            sys.exit()
        elif not options.fuzzfolder:
            print "(!) Please specify the fuzz folder path"
            sys.exit()
        # create a monitoring object with our executable and fuzz folder of files
        monitorAndLog = monitor(options.executable, options.fuzzfolder)
        if monitorAndLog.isValidFile() and monitorAndLog.isValidExtension():
            if monitorAndLog.isValidOutputFolder():
                confirmSettings = ("(+) Fuzzing file format '%s'\r\n" % (monitorAndLog.getExtension()))
                confirmSettings += ("(+) Fuzzing time is estimated to be <= %s %s\r\n" % (monitorAndLog.getCalculatedFuzzTime()))
                confirmSettings += ("(+) Fuzzing with %s files\r\n" % (monitorAndLog.numberOfFuzzFiles+1))
                confirmSettings += ("(+) Fuzzing executable %s \r\n" % (options.executable))
                confirmSettings += ("(+) Using arguments '%s' \r\n" % (monitorAndLog.arguments))
                print confirmSettings
                ready = raw_input("(+) Are these settings correct? ")
                if ready[0] == "y" or ready[0] == "Y":
                    confirmSettings += ("(+) Fuzzing started at %s\r\n" % (timer()))
                    if options.log:
                        print "(+) Logging into '%s'" % (monitorAndLog.logpath)
                        monitorAndLog.setLoggingOn()
                        monitorAndLog.log.write(banner()+'\n')
                        monitorAndLog.log.write(confirmSettings)
                    monitorAndLog.startFuzzing()
                    print ("(+) Fuzzing completed at %s" % (timer()))
                    if options.log:
                        monitorAndLog.log.close()
                else:
                    print "(!) Reset the configuration file to the appropriate settings"
                    sys.exit() 
        else:
            print "(-) Executable doesn't exist on system, check your path"
            sys.exit()
    else:
        print "(-) Incorrect mode specified, check your settings"
        sys.exit()                
Exemplo n.º 36
0
def trace(dst):
	id = int(random() * 0xFFFF)
	pkts = [IP(dst = dst, ttl = i) / ICMP(id = id, seq = i) for i in range(1, MAX_HOPS)]

	ans, unans = sr(pkts, timeout = TIMEOUT, verbose = False, chainCC = True)

	hops = [None] * MAX_HOPS
	final = MAX_HOPS

	for snt, rcv in ans:
		hops[snt.ttl - 1] = {'ip' : rcv.src, 'rtt' : rcv.time - snt.sent_time}

		if rcv.type == 0:
			final = min(final, snt.ttl)

	del hops[final:]

	return hops

if __name__ == "__main__":
	from monitor import monitor 
	from common import dst

	def trace_display(_):
		hops = trace(dst)

		return [hop and hop.values() or ['*'] for hop in hops], None

	monitor(None, trace_display, None)
Exemplo n.º 37
0
def main_i():
    """internal main routine

    parse command line, decide what action will be taken;
    we can either:
    - query/manipulate configuration
    - format gsyncd urls using gsyncd's url parsing engine
    - start service in following modes, in given stages:
      - agent: startup(), ChangelogAgent()
      - monitor: startup(), monitor()
      - master: startup(), connect_remote(), connect(), service_loop()
      - slave: startup(), connect(), service_loop()
    """
    rconf = {'go_daemon': 'should'}

    def store_abs(opt, optstr, val, parser):
        if val and val != '-':
            val = os.path.abspath(val)
        setattr(parser.values, opt.dest, val)

    def store_local(opt, optstr, val, parser):
        rconf[opt.dest] = val

    def store_local_curry(val):
        return lambda o, oo, vx, p: store_local(o, oo, val, p)

    def store_local_obj(op, dmake):
        return lambda o, oo, vx, p: store_local(
            o, oo, FreeObject(op=op, **dmake(vx)), p)

    op = OptionParser(
        usage="%prog [options...] <master> <slave>", version="%prog 0.0.1")
    op.add_option('--gluster-command-dir', metavar='DIR', default='')
    op.add_option('--gluster-log-file', metavar='LOGF',
                  default=os.devnull, type=str, action='callback',
                  callback=store_abs)
    op.add_option('--gluster-log-level', metavar='LVL')
    op.add_option('--gluster-params', metavar='PRMS', default='')
    op.add_option(
        '--glusterd-uuid', metavar='UUID', type=str, default='',
        help=SUPPRESS_HELP)
    op.add_option(
        '--gluster-cli-options', metavar='OPTS', default='--log-file=-')
    op.add_option('--mountbroker', metavar='LABEL')
    op.add_option('-p', '--pid-file', metavar='PIDF', type=str,
                  action='callback', callback=store_abs)
    op.add_option('-l', '--log-file', metavar='LOGF', type=str,
                  action='callback', callback=store_abs)
    op.add_option('--iprefix',  metavar='LOGD',  type=str,
                  action='callback', callback=store_abs)
    op.add_option('--changelog-log-file',  metavar='LOGF',  type=str,
                  action='callback', callback=store_abs)
    op.add_option('--log-file-mbr', metavar='LOGF', type=str,
                  action='callback', callback=store_abs)
    op.add_option('--state-file', metavar='STATF', type=str,
                  action='callback', callback=store_abs)
    op.add_option('--state-detail-file', metavar='STATF',
                  type=str, action='callback', callback=store_abs)
    op.add_option('--georep-session-working-dir', metavar='STATF',
                  type=str, action='callback', callback=store_abs)
    op.add_option('--ignore-deletes', default=False, action='store_true')
    op.add_option('--isolated-slave', default=False, action='store_true')
    op.add_option('--use-rsync-xattrs', default=False, action='store_true')
    op.add_option('--pause-on-start', default=False, action='store_true')
    op.add_option('-L', '--log-level', metavar='LVL')
    op.add_option('-r', '--remote-gsyncd', metavar='CMD',
                  default=os.path.abspath(sys.argv[0]))
    op.add_option('--volume-id', metavar='UUID')
    op.add_option('--slave-id', metavar='ID')
    op.add_option('--session-owner', metavar='ID')
    op.add_option('--local-id', metavar='ID', help=SUPPRESS_HELP, default='')
    op.add_option(
        '--local-path', metavar='PATH', help=SUPPRESS_HELP, default='')
    op.add_option('-s', '--ssh-command', metavar='CMD', default='ssh')
    op.add_option('--ssh-command-tar', metavar='CMD', default='ssh')
    op.add_option('--rsync-command', metavar='CMD', default='rsync')
    op.add_option('--rsync-options', metavar='OPTS', default='')
    op.add_option('--rsync-ssh-options', metavar='OPTS', default='--compress')
    op.add_option('--timeout', metavar='SEC', type=int, default=120)
    op.add_option('--connection-timeout', metavar='SEC',
                  type=int, default=60, help=SUPPRESS_HELP)
    op.add_option('--sync-jobs', metavar='N', type=int, default=3)
    op.add_option('--replica-failover-interval', metavar='N',
                  type=int, default=1)
    op.add_option(
        '--turns', metavar='N', type=int, default=0, help=SUPPRESS_HELP)
    op.add_option('--allow-network', metavar='IPS', default='')
    op.add_option('--socketdir', metavar='DIR')
    op.add_option('--state-socket-unencoded', metavar='SOCKF',
                  type=str, action='callback', callback=store_abs)
    op.add_option('--checkpoint', metavar='LABEL', default='')

    # tunables for failover/failback mechanism:
    # None   - gsyncd behaves as normal
    # blind  - gsyncd works with xtime pairs to identify
    #          candidates for synchronization
    # wrapup - same as normal mode but does not assign
    #          xtimes to orphaned files
    # see crawl() for usage of the above tunables
    op.add_option('--special-sync-mode', type=str, help=SUPPRESS_HELP)

    # changelog or xtime? (TODO: Change the default)
    op.add_option(
        '--change-detector', metavar='MODE', type=str, default='xtime')
    # sleep interval for change detection (xtime crawl uses a hardcoded 1
    # second sleep time)
    op.add_option('--change-interval', metavar='SEC', type=int, default=3)
    # working directory for changelog based mechanism
    op.add_option('--working-dir', metavar='DIR', type=str,
                  action='callback', callback=store_abs)
    op.add_option('--use-tarssh', default=False, action='store_true')

    op.add_option('-c', '--config-file', metavar='CONF',
                  type=str, action='callback', callback=store_local)
    # duh. need to specify dest or value will be mapped to None :S
    op.add_option('--monitor', dest='monitor', action='callback',
                  callback=store_local_curry(True))
    op.add_option('--agent', dest='agent', action='callback',
                  callback=store_local_curry(True))
    op.add_option('--resource-local', dest='resource_local',
                  type=str, action='callback', callback=store_local)
    op.add_option('--resource-remote', dest='resource_remote',
                  type=str, action='callback', callback=store_local)
    op.add_option('--feedback-fd', dest='feedback_fd', type=int,
                  help=SUPPRESS_HELP, action='callback', callback=store_local)
    op.add_option('--rpc-fd', dest='rpc_fd', type=str, help=SUPPRESS_HELP)
    op.add_option('--listen', dest='listen', help=SUPPRESS_HELP,
                  action='callback', callback=store_local_curry(True))
    op.add_option('-N', '--no-daemon', dest="go_daemon",
                  action='callback', callback=store_local_curry('dont'))
    op.add_option('--verify', type=str, dest="verify",
                  action='callback', callback=store_local)
    op.add_option('--create', type=str, dest="create",
                  action='callback', callback=store_local)
    op.add_option('--delete', dest='delete', action='callback',
                  callback=store_local_curry(True))
    op.add_option('--debug', dest="go_daemon", action='callback',
                  callback=lambda *a: (store_local_curry('dont')(*a),
                                       setattr(
                                           a[-1].values, 'log_file', '-'),
                                       setattr(a[-1].values, 'log_level',
                                               'DEBUG'),
                                       setattr(a[-1].values,
                                               'changelog_log_file', '-')))
    op.add_option('--path', type=str, action='append')

    for a in ('check', 'get'):
        op.add_option('--config-' + a, metavar='OPT', type=str, dest='config',
                      action='callback',
                      callback=store_local_obj(a, lambda vx: {'opt': vx}))
    op.add_option('--config-get-all', dest='config', action='callback',
                  callback=store_local_obj('get', lambda vx: {'opt': None}))
    for m in ('', '-rx', '-glob'):
        # call this code 'Pythonic' eh?
        # have to define a one-shot local function to be able
        # to inject (a value depending on the)
        # iteration variable into the inner lambda
        def conf_mod_opt_regex_variant(rx):
            op.add_option('--config-set' + m, metavar='OPT VAL', type=str,
                          nargs=2, dest='config', action='callback',
                          callback=store_local_obj('set', lambda vx: {
                              'opt': vx[0], 'val': vx[1], 'rx': rx}))
            op.add_option('--config-del' + m, metavar='OPT', type=str,
                          dest='config', action='callback',
                          callback=store_local_obj('del', lambda vx: {
                              'opt': vx, 'rx': rx}))
        conf_mod_opt_regex_variant(m and m[1:] or False)

    op.add_option('--normalize-url', dest='url_print',
                  action='callback', callback=store_local_curry('normal'))
    op.add_option('--canonicalize-url', dest='url_print',
                  action='callback', callback=store_local_curry('canon'))
    op.add_option('--canonicalize-escape-url', dest='url_print',
                  action='callback', callback=store_local_curry('canon_esc'))

    tunables = [norm(o.get_opt_string()[2:])
                for o in op.option_list
                if (o.callback in (store_abs, 'store_true', None) and
                    o.get_opt_string() not in ('--version', '--help'))]
    remote_tunables = ['listen', 'go_daemon', 'timeout',
                       'session_owner', 'config_file', 'use_rsync_xattrs']
    rq_remote_tunables = {'listen': True}

    # precedence for sources of values: 1) commandline, 2) cfg file, 3)
    # defaults for this to work out we need to tell apart defaults from
    # explicitly set options... so churn out the defaults here and call
    # the parser with virgin values container.
    defaults = op.get_default_values()
    opts, args = op.parse_args(values=optparse.Values())
    args_orig = args[:]
    r = rconf.get('resource_local')
    if r:
        if len(args) == 0:
            args.append(None)
        args[0] = r
    r = rconf.get('resource_remote')
    if r:
        if len(args) == 0:
            raise GsyncdError('local resource unspecfied')
        elif len(args) == 1:
            args.append(None)
        args[1] = r
    confdata = rconf.get('config')
    if not (len(args) == 2 or
            (len(args) == 1 and rconf.get('listen')) or
            (len(args) <= 2 and confdata) or
            rconf.get('url_print')):
        sys.stderr.write("error: incorrect number of arguments\n\n")
        sys.stderr.write(op.get_usage() + "\n")
        sys.exit(1)

    verify = rconf.get('verify')
    if verify:
        logging.info(verify)
        logging.info("Able to spawn gsyncd.py")
        return

    restricted = os.getenv('_GSYNCD_RESTRICTED_')

    if restricted:
        allopts = {}
        allopts.update(opts.__dict__)
        allopts.update(rconf)
        bannedtuns = set(allopts.keys()) - set(remote_tunables)
        if bannedtuns:
            raise GsyncdError('following tunables cannot be set with '
                              'restricted SSH invocaton: ' +
                              ', '.join(bannedtuns))
        for k, v in rq_remote_tunables.items():
            if not k in allopts or allopts[k] != v:
                raise GsyncdError('tunable %s is not set to value %s required '
                                  'for restricted SSH invocaton' %
                                  (k, v))

    confrx = getattr(confdata, 'rx', None)

    def makersc(aa, check=True):
        if not aa:
            return ([], None, None)
        ra = [resource.parse_url(u) for u in aa]
        local = ra[0]
        remote = None
        if len(ra) > 1:
            remote = ra[1]
        if check and not local.can_connect_to(remote):
            raise GsyncdError("%s cannot work with %s" %
                              (local.path, remote and remote.path))
        return (ra, local, remote)
    if confrx:
        # peers are regexen, don't try to parse them
        if confrx == 'glob':
            args = ['\A' + fnmatch.translate(a) for a in args]
        canon_peers = args
        namedict = {}
    else:
        dc = rconf.get('url_print')
        rscs, local, remote = makersc(args_orig, not dc)
        if dc:
            for r in rscs:
                print(r.get_url(**{'normal': {},
                                   'canon': {'canonical': True},
                                   'canon_esc': {'canonical': True,
                                                 'escaped': True}}[dc]))
            return
        pa = ([], [], [])
        urlprms = (
            {}, {'canonical': True}, {'canonical': True, 'escaped': True})
        for x in rscs:
            for i in range(len(pa)):
                pa[i].append(x.get_url(**urlprms[i]))
        _, canon_peers, canon_esc_peers = pa
        # creating the namedict, a dict representing various ways of referring
        # to / repreenting peers to be fillable in config templates
        mods = (lambda x: x, lambda x: x[
                0].upper() + x[1:], lambda x: 'e' + x[0].upper() + x[1:])
        if remote:
            rmap = {local: ('local', 'master'), remote: ('remote', 'slave')}
        else:
            rmap = {local: ('local', 'slave')}
        namedict = {}
        for i in range(len(rscs)):
            x = rscs[i]
            for name in rmap[x]:
                for j in range(3):
                    namedict[mods[j](name)] = pa[j][i]
                namedict[name + 'vol'] = x.volume
                if name == 'remote':
                    namedict['remotehost'] = x.remotehost
    if not 'config_file' in rconf:
        rconf['config_file'] = os.path.join(
            os.path.dirname(sys.argv[0]), "conf/gsyncd_template.conf")

    upgrade_config_file(rconf['config_file'])
    gcnf = GConffile(
        rconf['config_file'], canon_peers,
        defaults.__dict__, opts.__dict__, namedict)

    checkpoint_change = False
    if confdata:
        opt_ok = norm(confdata.opt) in tunables + [None]
        if confdata.op == 'check':
            if opt_ok:
                sys.exit(0)
            else:
                sys.exit(1)
        elif not opt_ok:
            raise GsyncdError("not a valid option: " + confdata.opt)
        if confdata.op == 'get':
            gcnf.get(confdata.opt)
        elif confdata.op == 'set':
            gcnf.set(confdata.opt, confdata.val, confdata.rx)
        elif confdata.op == 'del':
            gcnf.delete(confdata.opt, confdata.rx)
        # when modifying checkpoint, it's important to make a log
        # of that, so in that case we go on to set up logging even
        # if its just config invocation
        if confdata.opt == 'checkpoint' and confdata.op in ('set', 'del') and \
           not confdata.rx:
            checkpoint_change = True
        if not checkpoint_change:
            return

    gconf.__dict__.update(defaults.__dict__)
    gcnf.update_to(gconf.__dict__)
    gconf.__dict__.update(opts.__dict__)
    gconf.configinterface = gcnf

    delete = rconf.get('delete')
    if delete:
        logging.info('geo-replication delete')
        # Delete pid file, status file, socket file
        cleanup_paths = []
        if getattr(gconf, 'pid_file', None):
            cleanup_paths.append(gconf.pid_file)

        if getattr(gconf, 'state_file', None):
            cleanup_paths.append(gconf.state_file)

        if getattr(gconf, 'state_detail_file', None):
            cleanup_paths.append(gconf.state_detail_file)

        if getattr(gconf, 'state_socket_unencoded', None):
            cleanup_paths.append(gconf.state_socket_unencoded)

        cleanup_paths.append(rconf['config_file'][:-11] + "*")

        # Cleanup changelog working dirs
        if getattr(gconf, 'working_dir', None):
            try:
                shutil.rmtree(gconf.working_dir)
            except (IOError, OSError):
                if sys.exc_info()[1].errno == ENOENT:
                    pass
                else:
                    raise GsyncdError(
                        'Error while removing working dir: %s' %
                        gconf.working_dir)

        for path in cleanup_paths:
            # To delete temp files
            for f in glob.glob(path + "*"):
                _unlink(f)
        return

    if restricted and gconf.allow_network:
        ssh_conn = os.getenv('SSH_CONNECTION')
        if not ssh_conn:
            # legacy env var
            ssh_conn = os.getenv('SSH_CLIENT')
        if ssh_conn:
            allowed_networks = [IPNetwork(a)
                                for a in gconf.allow_network.split(',')]
            client_ip = IPAddress(ssh_conn.split()[0])
            allowed = False
            for nw in allowed_networks:
                if client_ip in nw:
                    allowed = True
                    break
            if not allowed:
                raise GsyncdError("client IP address is not allowed")

    ffd = rconf.get('feedback_fd')
    if ffd:
        fcntl.fcntl(ffd, fcntl.F_SETFD, fcntl.FD_CLOEXEC)

    # normalize loglevel
    lvl0 = gconf.log_level
    if isinstance(lvl0, str):
        lvl1 = lvl0.upper()
        lvl2 = logging.getLevelName(lvl1)
        # I have _never_ _ever_ seen such an utterly braindead
        # error condition
        if lvl2 == "Level " + lvl1:
            raise GsyncdError('cannot recognize log level "%s"' % lvl0)
        gconf.log_level = lvl2

    if not privileged() and gconf.log_file_mbr:
        gconf.log_file = gconf.log_file_mbr

    if checkpoint_change:
        try:
            GLogger._gsyncd_loginit(log_file=gconf.log_file, label='conf')
            if confdata.op == 'set':
                logging.info('checkpoint %s set' % confdata.val)
                gcnf.delete('checkpoint_completed')
                gcnf.delete('checkpoint_target')
            elif confdata.op == 'del':
                logging.info('checkpoint info was reset')
                # if it is removing 'checkpoint' then we need
                # to remove 'checkpoint_completed' and 'checkpoint_target' too
                gcnf.delete('checkpoint_completed')
                gcnf.delete('checkpoint_target')

        except IOError:
            if sys.exc_info()[1].errno == ENOENT:
                # directory of log path is not present,
                # which happens if we get here from
                # a peer-multiplexed "config-set checkpoint"
                # (as that directory is created only on the
                # original node)
                pass
            else:
                raise
        return

    create = rconf.get('create')
    if create:
        if getattr(gconf, 'state_file', None):
            update_file(gconf.state_file, lambda f: f.write(create + '\n'))
        return

    go_daemon = rconf['go_daemon']
    be_monitor = rconf.get('monitor')
    be_agent = rconf.get('agent')

    rscs, local, remote = makersc(args)
    if not be_monitor and isinstance(remote, resource.SSH) and \
       go_daemon == 'should':
        go_daemon = 'postconn'
        log_file = None
    else:
        log_file = gconf.log_file
    if be_monitor:
        label = 'monitor'
    elif be_agent:
        label = 'agent'
    elif remote:
        # master
        label = gconf.local_path
    else:
        label = 'slave'
    startup(go_daemon=go_daemon, log_file=log_file, label=label)
    resource.Popen.init_errhandler()

    if be_agent:
        os.setsid()
        logging.debug('rpc_fd: %s' % repr(gconf.rpc_fd))
        return agent(Changelog(), gconf.rpc_fd)

    if be_monitor:
        return monitor(*rscs)

    logging.info("syncing: %s" % " -> ".join(r.url for r in rscs))
    if remote:
        go_daemon = remote.connect_remote(go_daemon=go_daemon)
        if go_daemon:
            startup(go_daemon=go_daemon, log_file=gconf.log_file)
            # complete remote connection in child
            remote.connect_remote(go_daemon='done')
    local.connect()
    if ffd:
        os.close(ffd)
    local.service_loop(*[r for r in [remote] if r])
Exemplo n.º 38
0
import sys
import os
dir_module = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
dir_module = os.path.join(dir_module, 'common_module')
sys.path.insert(0, dir_module)

from monitor import monitor, write2file
from attack_signature import target_ip, target_sport, target_prot

infile = 'target_udp_traffic.csv'
files = [infile]
time_interval = 300 # seconds

import time
time_start = time.time()

# Monitor UDP/CHARGEN packets/bytes to target ip per time interval
udp_pkts, udp_byts, chargen_pkts, chargen_byts = [], [], [], []
monitor(files, target_prot, 'ANY', target_ip, time_interval, udp_pkts, udp_byts)
monitor(files, target_prot, target_sport, target_ip, time_interval, chargen_pkts, chargen_byts)

datas = [udp_pkts, udp_byts, chargen_pkts, chargen_byts]
outfiles = ['udp_pkts.txt', 'udp_byts.txt', 'chargen_pkts.txt', 'chargen_byts.txt']
for i in range(len(datas)):
    data, outfile = datas[i], outfiles[i]
    write2file(data, outfile)

time_end = time.time()
print "Time elapsed: ", time_end - time_start
Exemplo n.º 39
0
import os
import random
from flask import Flask, json, request
import requests
from monitor import monitor
app = Flask(__name__)
m = monitor()


def reply(message):
    payload = {\
    'bot_id' : os.environ['BOT_ID'],\
    'text'   : message}
    requests.post('https://api.groupme.com/v3/bots/post', json=payload)


@app.route('/', methods=['POST'])
def groupme_callback():
    json_body = request.get_json()
    if json_body['group_id'] == os.environ[
            'GROUP_ID'] and json_body['sender_type'] != 'bot':
        # some degree of verification that it is sent via a groupme callback
        # could also check for "User-Agent: GroupMeBotNotifier/1.0", but that's plenty spoofable
        message = json_body['text']
        message = m.printGpus()
        reply(message)


@app.route('/hello', methods=['GET'])
def helloWorld():
    return "hello world!"
Exemplo n.º 40
0
def main_i():
    rconf = {'go_daemon': 'should'}

    def store_abs(opt, optstr, val, parser):
        if val and val != '-':
            val = os.path.abspath(val)
        setattr(parser.values, opt.dest, val)
    def store_local(opt, optstr, val, parser):
        rconf[opt.dest] = val
    def store_local_curry(val):
        return lambda o, oo, vx, p: store_local(o, oo, val, p)
    def store_local_obj(op, dmake):
        return lambda o, oo, vx, p: store_local(o, oo, FreeObject(op=op, **dmake(vx)), p)

    op = OptionParser(usage="%prog [options...] <master> <slave>", version="%prog 0.0.1")
    op.add_option('--gluster-command',     metavar='CMD',   default='glusterfs')
    op.add_option('--gluster-log-file',    metavar='LOGF',  default=os.devnull, type=str, action='callback', callback=store_abs)
    op.add_option('--gluster-log-level',   metavar='LVL')
    op.add_option('-p', '--pid-file',      metavar='PIDF',  type=str, action='callback', callback=store_abs)
    op.add_option('-l', '--log-file',      metavar='LOGF',  type=str, action='callback', callback=store_abs)
    op.add_option('--state-file',          metavar='STATF', type=str, action='callback', callback=store_abs)
    op.add_option('-L', '--log-level',     metavar='LVL')
    op.add_option('-r', '--remote-gsyncd', metavar='CMD',   default=os.path.abspath(sys.argv[0]))
    op.add_option('--volume-id',           metavar='UUID')
    op.add_option('--session-owner',       metavar='ID')
    op.add_option('-s', '--ssh-command',   metavar='CMD',   default='ssh')
    op.add_option('--rsync-command',       metavar='CMD',   default='rsync')
    op.add_option('--rsync-extra',         metavar='ARGS',  default='-sS', help=SUPPRESS_HELP)
    op.add_option('--timeout',             metavar='SEC',   type=int, default=120)
    op.add_option('--sync-jobs',           metavar='N',     type=int, default=3)
    op.add_option('--turns',               metavar='N',     type=int, default=0, help=SUPPRESS_HELP)

    op.add_option('-c', '--config-file',   metavar='CONF',  type=str, action='callback', callback=store_local)
    # duh. need to specify dest or value will be mapped to None :S
    op.add_option('--monitor', dest='monitor', action='callback', callback=store_local_curry(True))
    op.add_option('--feedback-fd', dest='feedback_fd', type=int, help=SUPPRESS_HELP, action='callback', callback=store_local)
    op.add_option('--listen', dest='listen', help=SUPPRESS_HELP,      action='callback', callback=store_local_curry(True))
    op.add_option('-N', '--no-daemon', dest="go_daemon",    action='callback', callback=store_local_curry('dont'))
    op.add_option('--debug', dest="go_daemon",              action='callback', callback=lambda *a: (store_local_curry('dont')(*a),
                                                                                                    setattr(a[-1].values, 'log_file', '-'),
                                                                                                    setattr(a[-1].values, 'log_level', 'DEBUG'))),

    for a in ('check', 'get'):
        op.add_option('--config-' + a,      metavar='OPT',  type=str, dest='config', action='callback',
                      callback=store_local_obj(a, lambda vx: {'opt': vx}))
    op.add_option('--config-get-all', dest='config', action='callback', callback=store_local_obj('get', lambda vx: {'opt': None}))
    for m in ('', '-rx'):
        # call this code 'Pythonic' eh?
        # have to define a one-shot local function to be able to inject (a value depending on the)
        # iteration variable into the inner lambda
        def conf_mod_opt_regex_variant(rx):
            op.add_option('--config-set' + m,   metavar='OPT VAL', type=str, nargs=2, dest='config', action='callback',
                          callback=store_local_obj('set', lambda vx: {'opt': vx[0], 'val': vx[1], 'rx': rx}))
            op.add_option('--config-del' + m,   metavar='OPT',  type=str, dest='config', action='callback',
                          callback=store_local_obj('del', lambda vx: {'opt': vx, 'rx': rx}))
        conf_mod_opt_regex_variant(not not m)

    op.add_option('--normalize-url',           dest='url_print', action='callback', callback=store_local_curry('normal'))
    op.add_option('--canonicalize-url',        dest='url_print', action='callback', callback=store_local_curry('canon'))
    op.add_option('--canonicalize-escape-url', dest='url_print', action='callback', callback=store_local_curry('canon_esc'))

    tunables = [ norm(o.get_opt_string()[2:]) for o in op.option_list if o.callback in (store_abs, None) and o.get_opt_string() not in ('--version', '--help') ]

    # precedence for sources of values: 1) commandline, 2) cfg file, 3) defaults
    # -- for this to work out we need to tell apart defaults from explicitly set
    # options... so churn out the defaults here and call the parser with virgin
    # values container.
    defaults = op.get_default_values()
    opts, args = op.parse_args(values=optparse.Values())
    confdata = rconf.get('config')
    if not (len(args) == 2 or \
            (len(args) == 1 and rconf.get('listen')) or \
            (len(args) <= 2 and confdata) or \
            rconf.get('url_print')):
        sys.stderr.write("error: incorrect number of arguments\n\n")
        sys.stderr.write(op.get_usage() + "\n")
        sys.exit(1)

    if getattr(confdata, 'rx', None):
        # peers are regexen, don't try to parse them
        canon_peers = args
        namedict = {}
    else:
        rscs = [resource.parse_url(u) for u in args]
        dc = rconf.get('url_print')
        if dc:
            for r in rscs:
                print(r.get_url(**{'normal': {},
                                   'canon': {'canonical': True},
                                   'canon_esc': {'canonical': True, 'escaped': True}}[dc]))
            return
        local = remote = None
        if rscs:
            local = rscs[0]
            if len(rscs) > 1:
                remote = rscs[1]
            if not local.can_connect_to(remote):
                raise GsyncdError("%s cannot work with %s" % (local.path, remote and remote.path))
        pa = ([], [], [])
        urlprms = ({}, {'canonical': True}, {'canonical': True, 'escaped': True})
        for x in rscs:
            for i in range(len(pa)):
                pa[i].append(x.get_url(**urlprms[i]))
        peers, canon_peers, canon_esc_peers = pa
        # creating the namedict, a dict representing various ways of referring to / repreenting
        # peers to be fillable in config templates
        mods = (lambda x: x, lambda x: x[0].upper() + x[1:], lambda x: 'e' + x[0].upper() + x[1:])
        if remote:
            rmap = { local: ('local', 'master'), remote: ('remote', 'slave') }
        else:
            rmap = { local: ('local', 'slave') }
        namedict = {}
        for i in range(len(rscs)):
            x = rscs[i]
            for name in rmap[x]:
                for j in range(3):
                    namedict[mods[j](name)] = pa[j][i]
                if x.scheme == 'gluster':
                    namedict[name + 'vol'] = x.volume
    if not 'config_file' in rconf:
        rconf['config_file'] = os.path.join(os.path.dirname(sys.argv[0]), "conf/gsyncd.conf")
    gcnf = GConffile(rconf['config_file'], canon_peers, defaults.__dict__, opts.__dict__, namedict)

    if confdata:
        opt_ok = norm(confdata.opt) in tunables + [None]
        if confdata.op == 'check':
            if opt_ok:
                sys.exit(0)
            else:
                sys.exit(1)
        elif not opt_ok:
            raise GsyncdError("not a valid option: " + confdata.opt)
        if confdata.op == 'get':
            gcnf.get(confdata.opt)
        elif confdata.op == 'set':
            gcnf.set(confdata.opt, confdata.val, confdata.rx)
        elif confdata.op == 'del':
            gcnf.delete(confdata.opt, confdata.rx)
        return

    gconf.__dict__.update(defaults.__dict__)
    gcnf.update_to(gconf.__dict__)
    gconf.__dict__.update(opts.__dict__)
    gconf.configinterface = gcnf

    ffd = rconf.get('feedback_fd')
    if ffd:
        fcntl.fcntl(ffd, fcntl.F_SETFD, fcntl.FD_CLOEXEC)

    #normalize loglevel
    lvl0 = gconf.log_level
    if isinstance(lvl0, str):
        lvl1 = lvl0.upper()
        lvl2 = logging.getLevelName(lvl1)
        # I have _never_ _ever_ seen such an utterly braindead
        # error condition
        if lvl2 == "Level " + lvl1:
            raise GsyncdError('cannot recognize log level "%s"' % lvl0)
        gconf.log_level = lvl2

    go_daemon = rconf['go_daemon']
    be_monitor = rconf.get('monitor')

    if not be_monitor and isinstance(remote, resource.SSH) and \
       go_daemon == 'should':
        go_daemon = 'postconn'
        log_file = None
    else:
        log_file = gconf.log_file
    if be_monitor:
        label = 'monitor'
    elif remote:
        #master
        label = ''
    else:
        label = 'slave'
    startup(go_daemon=go_daemon, log_file=log_file, label=label)

    if be_monitor:
        return monitor()

    logging.info("syncing: %s" % " -> ".join(peers))
    if remote:
        go_daemon = remote.connect_remote(go_daemon=go_daemon)
        if go_daemon:
            startup(go_daemon=go_daemon, log_file=gconf.log_file)
            # complete remote connection in child
            remote.connect_remote(go_daemon='done')
    local.connect()
    if ffd:
        os.close(ffd)
    local.service_loop(*[r for r in [remote] if r])
Exemplo n.º 41
0
def main_i():
    """internal main routine

    parse command line, decide what action will be taken;
    we can either:
    - query/manipulate configuration
    - format gsyncd urls using gsyncd's url parsing engine
    - start service in following modes, in given stages:
      - agent: startup(), ChangelogAgent()
      - monitor: startup(), monitor()
      - master: startup(), connect_remote(), connect(), service_loop()
      - slave: startup(), connect(), service_loop()
    """
    rconf = {"go_daemon": "should"}

    def store_abs(opt, optstr, val, parser):
        if val and val != "-":
            val = os.path.abspath(val)
        setattr(parser.values, opt.dest, val)

    def store_local(opt, optstr, val, parser):
        rconf[opt.dest] = val

    def store_local_curry(val):
        return lambda o, oo, vx, p: store_local(o, oo, val, p)

    def store_local_obj(op, dmake):
        return lambda o, oo, vx, p: store_local(o, oo, FreeObject(op=op, **dmake(vx)), p)

    op = OptionParser(usage="%prog [options...] <master> <slave>", version="%prog 0.0.1")
    op.add_option("--gluster-command-dir", metavar="DIR", default="")
    op.add_option(
        "--gluster-log-file", metavar="LOGF", default=os.devnull, type=str, action="callback", callback=store_abs
    )
    op.add_option("--gluster-log-level", metavar="LVL")
    op.add_option("--gluster-params", metavar="PRMS", default="")
    op.add_option("--glusterd-uuid", metavar="UUID", type=str, default="", help=SUPPRESS_HELP)
    op.add_option("--gluster-cli-options", metavar="OPTS", default="--log-file=-")
    op.add_option("--mountbroker", metavar="LABEL")
    op.add_option("-p", "--pid-file", metavar="PIDF", type=str, action="callback", callback=store_abs)
    op.add_option("-l", "--log-file", metavar="LOGF", type=str, action="callback", callback=store_abs)
    op.add_option("--iprefix", metavar="LOGD", type=str, action="callback", callback=store_abs)
    op.add_option("--changelog-log-file", metavar="LOGF", type=str, action="callback", callback=store_abs)
    op.add_option("--log-file-mbr", metavar="LOGF", type=str, action="callback", callback=store_abs)
    op.add_option("--state-file", metavar="STATF", type=str, action="callback", callback=store_abs)
    op.add_option("--state-detail-file", metavar="STATF", type=str, action="callback", callback=store_abs)
    op.add_option("--georep-session-working-dir", metavar="STATF", type=str, action="callback", callback=store_abs)
    op.add_option("--ignore-deletes", default=False, action="store_true")
    op.add_option("--isolated-slave", default=False, action="store_true")
    op.add_option("--use-rsync-xattrs", default=False, action="store_true")
    op.add_option("--sync-xattrs", default=True, action="store_true")
    op.add_option("--sync-acls", default=True, action="store_true")
    op.add_option("--log-rsync-performance", default=False, action="store_true")
    op.add_option("--pause-on-start", default=False, action="store_true")
    op.add_option("-L", "--log-level", metavar="LVL")
    op.add_option("-r", "--remote-gsyncd", metavar="CMD", default=os.path.abspath(sys.argv[0]))
    op.add_option("--volume-id", metavar="UUID")
    op.add_option("--slave-id", metavar="ID")
    op.add_option("--session-owner", metavar="ID")
    op.add_option("--local-id", metavar="ID", help=SUPPRESS_HELP, default="")
    op.add_option("--local-path", metavar="PATH", help=SUPPRESS_HELP, default="")
    op.add_option("-s", "--ssh-command", metavar="CMD", default="ssh")
    op.add_option("--ssh-command-tar", metavar="CMD", default="ssh")
    op.add_option("--rsync-command", metavar="CMD", default="rsync")
    op.add_option("--rsync-options", metavar="OPTS", default="")
    op.add_option("--rsync-ssh-options", metavar="OPTS", default="--compress")
    op.add_option("--timeout", metavar="SEC", type=int, default=120)
    op.add_option("--connection-timeout", metavar="SEC", type=int, default=60, help=SUPPRESS_HELP)
    op.add_option("--sync-jobs", metavar="N", type=int, default=3)
    op.add_option("--replica-failover-interval", metavar="N", type=int, default=1)
    op.add_option("--changelog-archive-format", metavar="N", type=str, default="%Y%m")
    op.add_option("--use-meta-volume", default=False, action="store_true")
    op.add_option("--meta-volume-mnt", metavar="N", type=str, default="/var/run/gluster/shared_storage")
    op.add_option("--turns", metavar="N", type=int, default=0, help=SUPPRESS_HELP)
    op.add_option("--allow-network", metavar="IPS", default="")
    op.add_option("--socketdir", metavar="DIR")
    op.add_option("--state-socket-unencoded", metavar="SOCKF", type=str, action="callback", callback=store_abs)
    op.add_option("--checkpoint", metavar="LABEL", default="0")

    # tunables for failover/failback mechanism:
    # None   - gsyncd behaves as normal
    # blind  - gsyncd works with xtime pairs to identify
    #          candidates for synchronization
    # wrapup - same as normal mode but does not assign
    #          xtimes to orphaned files
    # see crawl() for usage of the above tunables
    op.add_option("--special-sync-mode", type=str, help=SUPPRESS_HELP)

    # changelog or xtime? (TODO: Change the default)
    op.add_option("--change-detector", metavar="MODE", type=str, default="xtime")
    # sleep interval for change detection (xtime crawl uses a hardcoded 1
    # second sleep time)
    op.add_option("--change-interval", metavar="SEC", type=int, default=3)
    # working directory for changelog based mechanism
    op.add_option("--working-dir", metavar="DIR", type=str, action="callback", callback=store_abs)
    op.add_option("--use-tarssh", default=False, action="store_true")

    op.add_option("-c", "--config-file", metavar="CONF", type=str, action="callback", callback=store_local)
    # duh. need to specify dest or value will be mapped to None :S
    op.add_option("--monitor", dest="monitor", action="callback", callback=store_local_curry(True))
    op.add_option("--agent", dest="agent", action="callback", callback=store_local_curry(True))
    op.add_option("--resource-local", dest="resource_local", type=str, action="callback", callback=store_local)
    op.add_option("--resource-remote", dest="resource_remote", type=str, action="callback", callback=store_local)
    op.add_option(
        "--feedback-fd", dest="feedback_fd", type=int, help=SUPPRESS_HELP, action="callback", callback=store_local
    )
    op.add_option("--rpc-fd", dest="rpc_fd", type=str, help=SUPPRESS_HELP)
    op.add_option("--subvol-num", dest="subvol_num", type=int, help=SUPPRESS_HELP)
    op.add_option("--listen", dest="listen", help=SUPPRESS_HELP, action="callback", callback=store_local_curry(True))
    op.add_option("-N", "--no-daemon", dest="go_daemon", action="callback", callback=store_local_curry("dont"))
    op.add_option("--verify", type=str, dest="verify", action="callback", callback=store_local)
    op.add_option("--create", type=str, dest="create", action="callback", callback=store_local)
    op.add_option("--delete", dest="delete", action="callback", callback=store_local_curry(True))
    op.add_option("--status-get", dest="status_get", action="callback", callback=store_local_curry(True))
    op.add_option(
        "--debug",
        dest="go_daemon",
        action="callback",
        callback=lambda *a: (
            store_local_curry("dont")(*a),
            setattr(a[-1].values, "log_file", "-"),
            setattr(a[-1].values, "log_level", "DEBUG"),
            setattr(a[-1].values, "changelog_log_file", "-"),
        ),
    )
    op.add_option("--path", type=str, action="append")

    for a in ("check", "get"):
        op.add_option(
            "--config-" + a,
            metavar="OPT",
            type=str,
            dest="config",
            action="callback",
            callback=store_local_obj(a, lambda vx: {"opt": vx}),
        )
    op.add_option(
        "--config-get-all", dest="config", action="callback", callback=store_local_obj("get", lambda vx: {"opt": None})
    )
    for m in ("", "-rx", "-glob"):
        # call this code 'Pythonic' eh?
        # have to define a one-shot local function to be able
        # to inject (a value depending on the)
        # iteration variable into the inner lambda
        def conf_mod_opt_regex_variant(rx):
            op.add_option(
                "--config-set" + m,
                metavar="OPT VAL",
                type=str,
                nargs=2,
                dest="config",
                action="callback",
                callback=store_local_obj("set", lambda vx: {"opt": vx[0], "val": vx[1], "rx": rx}),
            )
            op.add_option(
                "--config-del" + m,
                metavar="OPT",
                type=str,
                dest="config",
                action="callback",
                callback=store_local_obj("del", lambda vx: {"opt": vx, "rx": rx}),
            )

        conf_mod_opt_regex_variant(m and m[1:] or False)

    op.add_option("--normalize-url", dest="url_print", action="callback", callback=store_local_curry("normal"))
    op.add_option("--canonicalize-url", dest="url_print", action="callback", callback=store_local_curry("canon"))
    op.add_option(
        "--canonicalize-escape-url", dest="url_print", action="callback", callback=store_local_curry("canon_esc")
    )

    tunables = [
        norm(o.get_opt_string()[2:])
        for o in op.option_list
        if (o.callback in (store_abs, "store_true", None) and o.get_opt_string() not in ("--version", "--help"))
    ]
    remote_tunables = ["listen", "go_daemon", "timeout", "session_owner", "config_file", "use_rsync_xattrs"]
    rq_remote_tunables = {"listen": True}

    # precedence for sources of values: 1) commandline, 2) cfg file, 3)
    # defaults for this to work out we need to tell apart defaults from
    # explicitly set options... so churn out the defaults here and call
    # the parser with virgin values container.
    defaults = op.get_default_values()
    opts, args = op.parse_args(values=optparse.Values())
    args_orig = args[:]
    r = rconf.get("resource_local")
    if r:
        if len(args) == 0:
            args.append(None)
        args[0] = r
    r = rconf.get("resource_remote")
    if r:
        if len(args) == 0:
            raise GsyncdError("local resource unspecfied")
        elif len(args) == 1:
            args.append(None)
        args[1] = r
    confdata = rconf.get("config")
    if not (
        len(args) == 2
        or (len(args) == 1 and rconf.get("listen"))
        or (len(args) <= 2 and confdata)
        or rconf.get("url_print")
    ):
        sys.stderr.write("error: incorrect number of arguments\n\n")
        sys.stderr.write(op.get_usage() + "\n")
        sys.exit(1)

    verify = rconf.get("verify")
    if verify:
        logging.info(verify)
        logging.info("Able to spawn gsyncd.py")
        return

    restricted = os.getenv("_GSYNCD_RESTRICTED_")

    if restricted:
        allopts = {}
        allopts.update(opts.__dict__)
        allopts.update(rconf)
        bannedtuns = set(allopts.keys()) - set(remote_tunables)
        if bannedtuns:
            raise GsyncdError(
                "following tunables cannot be set with " "restricted SSH invocaton: " + ", ".join(bannedtuns)
            )
        for k, v in rq_remote_tunables.items():
            if not k in allopts or allopts[k] != v:
                raise GsyncdError("tunable %s is not set to value %s required " "for restricted SSH invocaton" % (k, v))

    confrx = getattr(confdata, "rx", None)

    def makersc(aa, check=True):
        if not aa:
            return ([], None, None)
        ra = [resource.parse_url(u) for u in aa]
        local = ra[0]
        remote = None
        if len(ra) > 1:
            remote = ra[1]
        if check and not local.can_connect_to(remote):
            raise GsyncdError("%s cannot work with %s" % (local.path, remote and remote.path))
        return (ra, local, remote)

    if confrx:
        # peers are regexen, don't try to parse them
        if confrx == "glob":
            args = ["\A" + fnmatch.translate(a) for a in args]
        canon_peers = args
        namedict = {}
    else:
        dc = rconf.get("url_print")
        rscs, local, remote = makersc(args_orig, not dc)
        if dc:
            for r in rscs:
                print(
                    r.get_url(
                        **{
                            "normal": {},
                            "canon": {"canonical": True},
                            "canon_esc": {"canonical": True, "escaped": True},
                        }[dc]
                    )
                )
            return
        pa = ([], [], [])
        urlprms = ({}, {"canonical": True}, {"canonical": True, "escaped": True})
        for x in rscs:
            for i in range(len(pa)):
                pa[i].append(x.get_url(**urlprms[i]))
        _, canon_peers, canon_esc_peers = pa
        # creating the namedict, a dict representing various ways of referring
        # to / repreenting peers to be fillable in config templates
        mods = (lambda x: x, lambda x: x[0].upper() + x[1:], lambda x: "e" + x[0].upper() + x[1:])
        if remote:
            rmap = {local: ("local", "master"), remote: ("remote", "slave")}
        else:
            rmap = {local: ("local", "slave")}
        namedict = {}
        for i in range(len(rscs)):
            x = rscs[i]
            for name in rmap[x]:
                for j in range(3):
                    namedict[mods[j](name)] = pa[j][i]
                namedict[name + "vol"] = x.volume
                if name == "remote":
                    namedict["remotehost"] = x.remotehost
    if not "config_file" in rconf:
        rconf["config_file"] = os.path.join(os.path.dirname(sys.argv[0]), "conf/gsyncd_template.conf")

    upgrade_config_file(rconf["config_file"])
    gcnf = GConffile(rconf["config_file"], canon_peers, defaults.__dict__, opts.__dict__, namedict)

    checkpoint_change = False
    if confdata:
        opt_ok = norm(confdata.opt) in tunables + [None]
        if confdata.op == "check":
            if opt_ok:
                sys.exit(0)
            else:
                sys.exit(1)
        elif not opt_ok:
            raise GsyncdError("not a valid option: " + confdata.opt)
        if confdata.op == "get":
            gcnf.get(confdata.opt)
        elif confdata.op == "set":
            gcnf.set(confdata.opt, confdata.val, confdata.rx)
        elif confdata.op == "del":
            gcnf.delete(confdata.opt, confdata.rx)
        # when modifying checkpoint, it's important to make a log
        # of that, so in that case we go on to set up logging even
        # if its just config invocation
        if confdata.opt == "checkpoint" and confdata.op in ("set", "del") and not confdata.rx:
            checkpoint_change = True
        if not checkpoint_change:
            return

    gconf.__dict__.update(defaults.__dict__)
    gcnf.update_to(gconf.__dict__)
    gconf.__dict__.update(opts.__dict__)
    gconf.configinterface = gcnf

    delete = rconf.get("delete")
    if delete:
        logging.info("geo-replication delete")
        # Delete pid file, status file, socket file
        cleanup_paths = []
        if getattr(gconf, "pid_file", None):
            cleanup_paths.append(gconf.pid_file)

        if getattr(gconf, "state_file", None):
            cleanup_paths.append(gconf.state_file)

        if getattr(gconf, "state_detail_file", None):
            cleanup_paths.append(gconf.state_detail_file)

        if getattr(gconf, "state_socket_unencoded", None):
            cleanup_paths.append(gconf.state_socket_unencoded)

        cleanup_paths.append(rconf["config_file"][:-11] + "*")

        # Cleanup changelog working dirs
        if getattr(gconf, "working_dir", None):
            try:
                shutil.rmtree(gconf.working_dir)
            except (IOError, OSError):
                if sys.exc_info()[1].errno == ENOENT:
                    pass
                else:
                    raise GsyncdError("Error while removing working dir: %s" % gconf.working_dir)

        for path in cleanup_paths:
            # To delete temp files
            for f in glob.glob(path + "*"):
                _unlink(f)
        return

    if restricted and gconf.allow_network:
        ssh_conn = os.getenv("SSH_CONNECTION")
        if not ssh_conn:
            # legacy env var
            ssh_conn = os.getenv("SSH_CLIENT")
        if ssh_conn:
            allowed_networks = [IPNetwork(a) for a in gconf.allow_network.split(",")]
            client_ip = IPAddress(ssh_conn.split()[0])
            allowed = False
            for nw in allowed_networks:
                if client_ip in nw:
                    allowed = True
                    break
            if not allowed:
                raise GsyncdError("client IP address is not allowed")

    ffd = rconf.get("feedback_fd")
    if ffd:
        fcntl.fcntl(ffd, fcntl.F_SETFD, fcntl.FD_CLOEXEC)

    # normalize loglevel
    lvl0 = gconf.log_level
    if isinstance(lvl0, str):
        lvl1 = lvl0.upper()
        lvl2 = logging.getLevelName(lvl1)
        # I have _never_ _ever_ seen such an utterly braindead
        # error condition
        if lvl2 == "Level " + lvl1:
            raise GsyncdError('cannot recognize log level "%s"' % lvl0)
        gconf.log_level = lvl2

    if not privileged() and gconf.log_file_mbr:
        gconf.log_file = gconf.log_file_mbr

    if checkpoint_change:
        try:
            GLogger._gsyncd_loginit(log_file=gconf.log_file, label="conf")
            if confdata.op == "set":
                logging.info("checkpoint %s set" % confdata.val)
            elif confdata.op == "del":
                logging.info("checkpoint info was reset")
        except IOError:
            if sys.exc_info()[1].errno == ENOENT:
                # directory of log path is not present,
                # which happens if we get here from
                # a peer-multiplexed "config-set checkpoint"
                # (as that directory is created only on the
                # original node)
                pass
            else:
                raise
        return

    create = rconf.get("create")
    if create:
        if getattr(gconf, "state_file", None):
            set_monitor_status(gconf.state_file, create)
        return

    go_daemon = rconf["go_daemon"]
    be_monitor = rconf.get("monitor")
    be_agent = rconf.get("agent")

    rscs, local, remote = makersc(args)

    status_get = rconf.get("status_get")
    if status_get:
        for brick in gconf.path:
            brick_status = GeorepStatus(gconf.state_file, brick)
            checkpoint_time = int(getattr(gconf, "checkpoint", "0"))
            brick_status.print_status(checkpoint_time=checkpoint_time)
        return

    if not be_monitor and isinstance(remote, resource.SSH) and go_daemon == "should":
        go_daemon = "postconn"
        log_file = None
    else:
        log_file = gconf.log_file
    if be_monitor:
        label = "monitor"
    elif be_agent:
        label = "agent"
    elif remote:
        # master
        label = gconf.local_path
    else:
        label = "slave"
    startup(go_daemon=go_daemon, log_file=log_file, label=label)
    resource.Popen.init_errhandler()

    if be_agent:
        os.setsid()
        logging.debug("rpc_fd: %s" % repr(gconf.rpc_fd))
        return agent(Changelog(), gconf.rpc_fd)

    if be_monitor:
        return monitor(*rscs)

    logging.info("syncing: %s" % " -> ".join(r.url for r in rscs))
    if remote:
        go_daemon = remote.connect_remote(go_daemon=go_daemon)
        if go_daemon:
            startup(go_daemon=go_daemon, log_file=gconf.log_file)
            # complete remote connection in child
            remote.connect_remote(go_daemon="done")
    local.connect()
    if ffd:
        os.close(ffd)
    local.service_loop(*[r for r in [remote] if r])
Exemplo n.º 42
0
def main():
    monitor()
Exemplo n.º 43
0
def main_i():
    """internal main routine

    parse command line, decide what action will be taken;
    we can either:
    - query/manipulate configuration
    - format gsyncd urls using gsyncd's url parsing engine
    - start service in following modes, in given stages:
      - monitor: startup(), monitor()
      - master: startup(), connect_remote(), connect(), service_loop()
      - slave: startup(), connect(), service_loop()
    """
    rconf = {'go_daemon': 'should'}

    def store_abs(opt, optstr, val, parser):
        if val and val != '-':
            val = os.path.abspath(val)
        setattr(parser.values, opt.dest, val)
    def store_local(opt, optstr, val, parser):
        rconf[opt.dest] = val
    def store_local_curry(val):
        return lambda o, oo, vx, p: store_local(o, oo, val, p)
    def store_local_obj(op, dmake):
        return lambda o, oo, vx, p: store_local(o, oo, FreeObject(op=op, **dmake(vx)), p)

    op = OptionParser(usage="%prog [options...] <master> <slave>", version="%prog 0.0.1")
    op.add_option('--gluster-command-dir', metavar='DIR',   default='')
    op.add_option('--gluster-log-file',    metavar='LOGF',  default=os.devnull, type=str, action='callback', callback=store_abs)
    op.add_option('--gluster-log-level',   metavar='LVL')
    op.add_option('--gluster-params',      metavar='PRMS',  default='')
    op.add_option('--gluster-cli-options', metavar='OPTS',  default='--log-file=-')
    op.add_option('--mountbroker',         metavar='LABEL')
    op.add_option('-p', '--pid-file',      metavar='PIDF',  type=str, action='callback', callback=store_abs)
    op.add_option('-l', '--log-file',      metavar='LOGF',  type=str, action='callback', callback=store_abs)
    op.add_option('--state-file',          metavar='STATF', type=str, action='callback', callback=store_abs)
    op.add_option('--ignore-deletes',      default=False, action='store_true')
    op.add_option('-L', '--log-level',     metavar='LVL')
    op.add_option('-r', '--remote-gsyncd', metavar='CMD',   default=os.path.abspath(sys.argv[0]))
    op.add_option('--volume-id',           metavar='UUID')
    op.add_option('--session-owner',       metavar='ID')
    op.add_option('-s', '--ssh-command',   metavar='CMD',   default='ssh')
    op.add_option('--rsync-command',       metavar='CMD',   default='rsync')
    op.add_option('--timeout',             metavar='SEC',   type=int, default=120)
    op.add_option('--connection-timeout',  metavar='SEC',   type=int, default=60, help=SUPPRESS_HELP)
    op.add_option('--sync-jobs',           metavar='N',     type=int, default=3)
    op.add_option('--turns',               metavar='N',     type=int, default=0, help=SUPPRESS_HELP)
    op.add_option('--allow-network',       metavar='IPS',   default='')

    op.add_option('-c', '--config-file',   metavar='CONF',  type=str, action='callback', callback=store_local)
    # duh. need to specify dest or value will be mapped to None :S
    op.add_option('--monitor', dest='monitor', action='callback', callback=store_local_curry(True))
    op.add_option('--feedback-fd', dest='feedback_fd', type=int, help=SUPPRESS_HELP, action='callback', callback=store_local)
    op.add_option('--listen', dest='listen', help=SUPPRESS_HELP,      action='callback', callback=store_local_curry(True))
    op.add_option('-N', '--no-daemon', dest="go_daemon",    action='callback', callback=store_local_curry('dont'))
    op.add_option('--debug', dest="go_daemon",              action='callback', callback=lambda *a: (store_local_curry('dont')(*a),
                                                                                                    setattr(a[-1].values, 'log_file', '-'),
                                                                                                    setattr(a[-1].values, 'log_level', 'DEBUG'))),

    for a in ('check', 'get'):
        op.add_option('--config-' + a,      metavar='OPT',  type=str, dest='config', action='callback',
                      callback=store_local_obj(a, lambda vx: {'opt': vx}))
    op.add_option('--config-get-all', dest='config', action='callback', callback=store_local_obj('get', lambda vx: {'opt': None}))
    for m in ('', '-rx', '-glob'):
        # call this code 'Pythonic' eh?
        # have to define a one-shot local function to be able to inject (a value depending on the)
        # iteration variable into the inner lambda
        def conf_mod_opt_regex_variant(rx):
            op.add_option('--config-set' + m,   metavar='OPT VAL', type=str, nargs=2, dest='config', action='callback',
                          callback=store_local_obj('set', lambda vx: {'opt': vx[0], 'val': vx[1], 'rx': rx}))
            op.add_option('--config-del' + m,   metavar='OPT',  type=str, dest='config', action='callback',
                          callback=store_local_obj('del', lambda vx: {'opt': vx, 'rx': rx}))
        conf_mod_opt_regex_variant(m and m[1:] or False)

    op.add_option('--normalize-url',           dest='url_print', action='callback', callback=store_local_curry('normal'))
    op.add_option('--canonicalize-url',        dest='url_print', action='callback', callback=store_local_curry('canon'))
    op.add_option('--canonicalize-escape-url', dest='url_print', action='callback', callback=store_local_curry('canon_esc'))

    tunables = [ norm(o.get_opt_string()[2:]) for o in op.option_list if o.callback in (store_abs, 'store_true', None) and o.get_opt_string() not in ('--version', '--help') ]
    remote_tunables = [ 'listen', 'go_daemon', 'timeout', 'session_owner', 'config_file' ]
    rq_remote_tunables = { 'listen': True }

    # precedence for sources of values: 1) commandline, 2) cfg file, 3) defaults
    # -- for this to work out we need to tell apart defaults from explicitly set
    # options... so churn out the defaults here and call the parser with virgin
    # values container.
    defaults = op.get_default_values()
    opts, args = op.parse_args(values=optparse.Values())
    confdata = rconf.get('config')
    if not (len(args) == 2 or \
            (len(args) == 1 and rconf.get('listen')) or \
            (len(args) <= 2 and confdata) or \
            rconf.get('url_print')):
        sys.stderr.write("error: incorrect number of arguments\n\n")
        sys.stderr.write(op.get_usage() + "\n")
        sys.exit(1)

    restricted = os.getenv('_GSYNCD_RESTRICTED_')

    if restricted:
        allopts = {}
        allopts.update(opts.__dict__)
        allopts.update(rconf)
        bannedtuns = set(allopts.keys()) - set(remote_tunables)
        if bannedtuns:
            raise GsyncdError('following tunables cannot be set with restricted SSH invocaton: ' + \
                              ', '.join(bannedtuns))
        for k, v in rq_remote_tunables.items():
            if not k in allopts or allopts[k] != v:
                raise GsyncdError('tunable %s is not set to value %s required for restricted SSH invocaton' % \
                                  (k, v))

    confrx = getattr(confdata, 'rx', None)
    if confrx:
        # peers are regexen, don't try to parse them
        if confrx == 'glob':
            args = [ '\A' + fnmatch.translate(a) for a in args ]
        canon_peers = args
        namedict = {}
    else:
        rscs = [resource.parse_url(u) for u in args]
        dc = rconf.get('url_print')
        if dc:
            for r in rscs:
                print(r.get_url(**{'normal': {},
                                   'canon': {'canonical': True},
                                   'canon_esc': {'canonical': True, 'escaped': True}}[dc]))
            return
        local = remote = None
        if rscs:
            local = rscs[0]
            if len(rscs) > 1:
                remote = rscs[1]
            if not local.can_connect_to(remote):
                raise GsyncdError("%s cannot work with %s" % (local.path, remote and remote.path))
        pa = ([], [], [])
        urlprms = ({}, {'canonical': True}, {'canonical': True, 'escaped': True})
        for x in rscs:
            for i in range(len(pa)):
                pa[i].append(x.get_url(**urlprms[i]))
        peers, canon_peers, canon_esc_peers = pa
        # creating the namedict, a dict representing various ways of referring to / repreenting
        # peers to be fillable in config templates
        mods = (lambda x: x, lambda x: x[0].upper() + x[1:], lambda x: 'e' + x[0].upper() + x[1:])
        if remote:
            rmap = { local: ('local', 'master'), remote: ('remote', 'slave') }
        else:
            rmap = { local: ('local', 'slave') }
        namedict = {}
        for i in range(len(rscs)):
            x = rscs[i]
            for name in rmap[x]:
                for j in range(3):
                    namedict[mods[j](name)] = pa[j][i]
                if x.scheme == 'gluster':
                    namedict[name + 'vol'] = x.volume
    if not 'config_file' in rconf:
        rconf['config_file'] = os.path.join(os.path.dirname(sys.argv[0]), "conf/gsyncd.conf")
    gcnf = GConffile(rconf['config_file'], canon_peers, defaults.__dict__, opts.__dict__, namedict)

    if confdata:
        opt_ok = norm(confdata.opt) in tunables + [None]
        if confdata.op == 'check':
            if opt_ok:
                sys.exit(0)
            else:
                sys.exit(1)
        elif not opt_ok:
            raise GsyncdError("not a valid option: " + confdata.opt)
        if confdata.op == 'get':
            gcnf.get(confdata.opt)
        elif confdata.op == 'set':
            gcnf.set(confdata.opt, confdata.val, confdata.rx)
        elif confdata.op == 'del':
            gcnf.delete(confdata.opt, confdata.rx)
        return

    gconf.__dict__.update(defaults.__dict__)
    gcnf.update_to(gconf.__dict__)
    gconf.__dict__.update(opts.__dict__)
    gconf.configinterface = gcnf

    if restricted and gconf.allow_network:
        ssh_conn = os.getenv('SSH_CONNECTION')
        if not ssh_conn:
            #legacy env var
            ssh_conn = os.getenv('SSH_CLIENT')
        if ssh_conn:
            allowed_networks = [ IPNetwork(a) for a in gconf.allow_network.split(',') ]
            client_ip = IPAddress(ssh_conn.split()[0])
            allowed = False
            for nw in allowed_networks:
                if client_ip in nw:
                    allowed = True
                    break
            if not allowed:
                raise GsyncdError("client IP address is not allowed")

    ffd = rconf.get('feedback_fd')
    if ffd:
        fcntl.fcntl(ffd, fcntl.F_SETFD, fcntl.FD_CLOEXEC)

    #normalize loglevel
    lvl0 = gconf.log_level
    if isinstance(lvl0, str):
        lvl1 = lvl0.upper()
        lvl2 = logging.getLevelName(lvl1)
        # I have _never_ _ever_ seen such an utterly braindead
        # error condition
        if lvl2 == "Level " + lvl1:
            raise GsyncdError('cannot recognize log level "%s"' % lvl0)
        gconf.log_level = lvl2

    go_daemon = rconf['go_daemon']
    be_monitor = rconf.get('monitor')

    if not be_monitor and isinstance(remote, resource.SSH) and \
       go_daemon == 'should':
        go_daemon = 'postconn'
        log_file = None
    else:
        log_file = gconf.log_file
    if be_monitor:
        label = 'monitor'
    elif remote:
        #master
        label = ''
    else:
        label = 'slave'
    startup(go_daemon=go_daemon, log_file=log_file, label=label)

    if be_monitor:
        return monitor()

    logging.info("syncing: %s" % " -> ".join(peers))
    resource.Popen.init_errhandler()
    if remote:
        go_daemon = remote.connect_remote(go_daemon=go_daemon)
        if go_daemon:
            startup(go_daemon=go_daemon, log_file=gconf.log_file)
            # complete remote connection in child
            remote.connect_remote(go_daemon='done')
    local.connect()
    if ffd:
        os.close(ffd)
    local.service_loop(*[r for r in [remote] if r])
Exemplo n.º 44
0
def handle_incident():
    with monitor('GET /'):
        response = requests.get('http://localhost:8000')
        return response.text
Exemplo n.º 45
0
 def __init__(self, *args, **kwds):
     # begin wxGlade: MyFrame.__init__
     kwds["style"] = wx.DEFAULT_FRAME_STYLE
     wx.Frame.__init__(self, *args, **kwds)
     self.panels = wx.Notebook(self, -1, style=0)
     self.politica_llegada = 0
     self.politica_ejecucion = 0
     self.politica_impresion = 0
     self.politica_ejecution_alterada = False
     
     #globales
     self.RAM = 512
     self.numero_procesos = 5
     self.tareas = []
     self.memoria = []
     self.historial = [] #realmente es bidimensional
     self.historial_impresora = [] #realmente es bidimensional
     self.temp_tareas = [] 
     
     # Menu Bar
     self.menu_principal = wx.MenuBar()
     self.archivo = wx.Menu()
     self.salir = wx.MenuItem(self.archivo, wx.NewId(), "&Salir de la Aplicacion", "", wx.ITEM_NORMAL)
     self.archivo.AppendItem(self.salir)
     self.menu_principal.Append(self.archivo, "&Archivo")
     
     self.menu_procesos = wx.Menu()
     self.correr = wx.MenuItem(self.menu_procesos, wx.NewId(), "&Correr procesos actuales", "", wx.ITEM_NORMAL)
     self.detener = wx.MenuItem(self.menu_procesos, wx.NewId(), "&Detener procesos actuales", "", wx.ITEM_NORMAL)
     self.menu_procesos.AppendItem(self.correr)
     self.menu_procesos.AppendItem(self.detener)
     self.menu_principal.Append(self.menu_procesos, "&Procesos")
             
     self.opciones = wx.Menu()
     self.prioridad_ini = wx.Menu()
     self.prioridad_ini_fifo = wx.MenuItem(self.prioridad_ini, wx.NewId(), "FIFO", "", wx.ITEM_RADIO)
     self.prioridad_ini_RAM = wx.MenuItem(self.prioridad_ini, wx.NewId(), "RAM", "", wx.ITEM_RADIO)
     self.prioridad_ini_tiempo = wx.MenuItem(self.prioridad_ini, wx.NewId(), "Tiempo", "", wx.ITEM_RADIO)
     self.prioridad_ini.AppendItem(self.prioridad_ini_fifo)
     self.prioridad_ini.AppendItem(self.prioridad_ini_RAM)
     self.prioridad_ini.AppendItem(self.prioridad_ini_tiempo)
     self.prioridad_eje = wx.Menu()
     self.prioridad_eje_RAM = wx.MenuItem(self.prioridad_eje, wx.NewId(), "RAM", "", wx.ITEM_RADIO)
     self.prioridad_eje_fifo = wx.MenuItem(self.prioridad_eje, wx.NewId(), "FIFO", "", wx.ITEM_RADIO)
     self.prioridad_eje_tiempo = wx.MenuItem(self.prioridad_eje, wx.NewId(), "Tiempo", "", wx.ITEM_RADIO)
     self.prioridad_eje.AppendItem(self.prioridad_eje_RAM)
     self.prioridad_eje.AppendItem(self.prioridad_eje_fifo)
     self.prioridad_eje.AppendItem(self.prioridad_eje_tiempo)
     self.prioridad_imp = wx.Menu()
     self.prioridad_imp_fifo = wx.MenuItem(self.prioridad_imp, wx.NewId(), "FIFO", "", wx.ITEM_RADIO)
     self.prioridad_imp_tiempo = wx.MenuItem(self.prioridad_imp, wx.NewId(), "Tiempo", "", wx.ITEM_RADIO)
     self.prioridad_imp.AppendItem(self.prioridad_imp_fifo)
     self.prioridad_imp.AppendItem(self.prioridad_imp_tiempo)
     self.opciones.AppendMenu(wx.NewId(),"Prioridad de e&ntrada",self.prioridad_ini,"")
     self.opciones.AppendMenu(wx.NewId(),"Prioridad por e&jecución",self.prioridad_eje,"")
     self.opciones.AppendMenu(wx.NewId(),"Prioridad de &impresion",self.prioridad_imp,"")
     self.menu_principal.Append(self.opciones, "&Opciones")
     
     self.ayuda = wx.Menu()
     self.acercade = wx.MenuItem(self.ayuda, wx.NewId(), "Acerca de", "", wx.ITEM_NORMAL)
     self.ayuda.AppendItem(self.acercade)
     self.menu_principal.Append(self.ayuda, "A&yuda")
     self.SetMenuBar(self.menu_principal)
     
     # Menu Bar end
     self.procesos = wx.ListCtrl(self.panels, -1, style=wx.LC_REPORT|wx.SUNKEN_BORDER)
     self.memoria = wx.ListCtrl(self.panels, -1, style=wx.LC_REPORT|wx.SUNKEN_BORDER)
     #self.grafica = wx.ScrolledWindow(self.panels, -1)
     self.concurrencia = wx.ListCtrl(self.panels, -1, style=wx.LC_REPORT|wx.SUNKEN_BORDER)
     self.ent_sal = wx.ListCtrl(self.panels, -1, style=wx.LC_REPORT|wx.SUNKEN_BORDER)
     self.status_bar = wx.StatusBar(self, -1)
     
     #graficas
     self.graficas = []
     self.graficas_impresora = []
     
     #creamos el monitor
     self.monitor = monitor.monitor(self)
     #la impresora
     self.impresora = impresora.impresora(self)
     
     self.__set_properties()
     self.__do_layout()
     
     self.Bind(wx.EVT_MENU, self.handler_menu_salir, self.salir)
     self.Bind(wx.EVT_MENU, self.handler_menu_acercade, self.acercade)
     #self.Bind(wx.EVT_MENU, self.handler_iniciar, self.iniciar)
     self.Bind(wx.EVT_MENU, self.handler_correr, self.correr)
     self.Bind(wx.EVT_MENU, self.handler_detener, self.detener)
     self.Bind(wx.EVT_PAINT, self.OnPaint_Graphic)
     
     
     #asignamos eventos a las politicas
     self.Bind(wx.EVT_MENU, self.handler_prioridad_ini_fifo, self.prioridad_ini_fifo)
     self.Bind(wx.EVT_MENU, self.handler_prioridad_ini_tiempo, self.prioridad_ini_tiempo)
     self.Bind(wx.EVT_MENU, self.handler_prioridad_ini_recursos, self.prioridad_ini_RAM)
     self.Bind(wx.EVT_MENU, self.handler_prioridad_eje_fifo, self.prioridad_eje_fifo)
     self.Bind(wx.EVT_MENU, self.handler_prioridad_eje_tiempo, self.prioridad_eje_tiempo)
     self.Bind(wx.EVT_MENU, self.handler_prioridad_eje_recursos, self.prioridad_eje_RAM)
     self.Bind(wx.EVT_MENU, self.handler_prioridad_imp_fifo, self.prioridad_imp_fifo)
     self.Bind(wx.EVT_MENU, self.handler_prioridad_imp_tiempo, self.prioridad_imp_tiempo)
Exemplo n.º 46
0
    print("Reading config...", end=" ")
    with open("config.json",
              "r") as file:  # Read the settings from config.json
        jsonImported = json.loads(file.read())
        appsToMonitor = jsonImported["monitorApps"]
        appNames = jsonImported["appNames"]
        appColors = jsonImported["appColors"]
        appIcons = jsonImported["appIcons"]
        timeout = jsonImported["refreshTime"]
        uploadPerEpoch = jsonImported["uploadPerEpoch"]
        print("Done.")
        print(f"Refresh rate: {timeout}, Upload interval: {uploadPerEpoch}")

    print("Populating data (if exist)", end=" ")
    with open("tracking.json", "r") as file:  # Populate existing data is exist
        try:
            track = json.load(file)
            print("Done.")
        except:
            pass

    print("Monitoring: ")
    data = [
        track, appsToMonitor, appNames, appIcons, appColors, timeout,
        uploadPerEpoch, drive
    ]
    mon_mod.monitor(data)  # Start monitoring
    plot()  # Plot it

# print(track)
Exemplo n.º 47
0
		else:
			stats.append(None)

	return stats

def hist(log):
	plt.hist(log, 20)
	plt.show()

if __name__ == "__main__":
	from monitor import monitor 
	from common import dst

	def update_stats(log):
		hops = trace(dst)

		for i, hop in enumerate(hops):
			if i >= len(log):
				log.append([])
			if hop:
				log[i].append(hop)
		stats = statistics(log)

		return [[s[key] for key in header] if s else ['*'] for s in stats], log

	log = monitor(header, update_stats, [])

	dump(log)

	# hist(samples)