def main(args):
    coco_dataloader = CocoDataLoader(args)
    trainloader = coco_dataloader.get_trainloader(args)

    yolov2 = DarkNet19()
    optimizer = Optimizer(yolov2.parameters(), args.lr)
    criterion = Loss()

    logger = Logger()
    logger.info('----- Starting training -----')

    for epoch in range(args.epochs):

        for i,data in enumerate(trainloader):
            images, targets = data

            optimizer.zero_grad()

            outputs = yolov2(images)
            total_loss = criterion.get_total_loss(outputs, targets, args)

            total_loss.backward()
            optimizer.step()

            logger.info(f'Epoch: {epoch+1}/{args.epochs}, Step: {i+1}, Loss: {loss.data}')

    logger.info('----- Training done! -----')
Ejemplo n.º 2
0
    def __init__(self, name, filename):
        self.logger = Logger(filename)
        self.name = name

        self.memory = {}
        self.localmemory = {}
        self.functionargs = {}
        self.func = None
        self.r_value = None

        self.igmemory = {}
        self.local = {}
        self.igfunctions = {}
        self.igfunc = None
        self.igfuncinfo = None

        self.igloops = {}

        self.usedvars = set()
        self.tempvars = set()
        self.prefixes = []
        self.loop = []
        self.break_var = []
        self.loops = 0
        self.tags = 0
Ejemplo n.º 3
0
    def run(self):

        self.env_check()
        if not self.dir_make():
            Logger(self.file_name).get_logger().info("mysql dir make failed")
            return 20002, 'mysql dir make failed'
        if not self.cnf_make():
            Logger(self.file_name).get_logger().info("my.cnf update failed")
            return 20001, 'my.cnf update failed'
        if not self.mysql_install():
            Logger(self.file_name).get_logger().info("mysql install failed")
            return 20003, 'mysql install failed'
        time.sleep(15)
        if not self.mysql_user_grant():
            Logger(self.file_name).get_logger().info("grants failed")
            return 20010, 'grants failed'
        return True
Ejemplo n.º 4
0
 def group_check(self):
     mgroup = os.popen(
         "cat /etc/group |grep -w mysql|awk -F':' '{print $1}'").read(
         ).strip('\n')
     if not mgroup == '':
         return True
     Logger(self.file_name).get_logger().error("group check fail2")
     return False
Ejemplo n.º 5
0
 def user_check(self):
     muser = os.popen(
         "cat /etc/passwd |grep -w mysql|awk -F':' '{print $1}'").read(
         ).strip('\n')
     if not muser == '':
         return True
     Logger(self.file_name).get_logger().info("user check fial2")
     return False
Ejemplo n.º 6
0
 def __init__(self):
     self.limitation = int(FREQUENCY) * int(INTERVAL)
     self.lg = Logger("/var/log/nginx/block_log.log", level="info")
     self.nl = NginxLog()
     pool = redis.ConnectionPool(host=REDIS['host'],
                                 port=REDIS['port'],
                                 password=REDIS['password'],
                                 db=REDIS['db'])
     self.con = redis.Redis(connection_pool=pool)
Ejemplo n.º 7
0
 def env_check(self):
     mbase = os.path.isdir(self.mysql_base)
     mdatap = os.path.isdir(self.mysql_data_path)
     mpak = os.path.exists(self.mysql_package)
     if not self.group_check():
         os.system('groupadd mysql')
     if not self.user_check():
         os.system('useradd -r -g mysql -s /sbin/nologin mysql')
     if mbase == False:
         if mdatap == False:
             if mpak == False:
                 Logger(self.file_name).get_logger().info(
                     "mysql package not exist")
                 return False
         else:
             Logger(
                 self.file_name).get_logger().info("mysql data path exist")
             return False
     else:
         Logger(self.file_name).get_logger().info("mysql base path exist")
         return False
     return True
Ejemplo n.º 8
0
    def mysql_install(self):
        Logger(
            self.file_name).get_logger().info("start unzip mysql package...")
        t = tarfile.open(self.mysql_package, "r:gz")
        t.extractall(path=self.mysql_data_path)
        t.close()
        # path
        os.renames(self.mysql_document, self.mysql_base)
        cmd_own_confirm = "chown -R mysql.mysql {}".format(self.mysql_base)
        if not subprocess.call(cmd_own_confirm, shell=True) == 0:
            Logger(self.file_name).get_logger().info("chown mysql base fail")
            return False
        cmd_mod_green = "chmod -R g+rw {}".format(self.mysql_data_path)
        if not subprocess.call(cmd_mod_green, shell=True) == 0:
            Logger(self.file_name).get_logger().info("chow mysql data fail")
            return False
        cmd_mod = [
            'chmod -R 755 %s' % self.mysql_base,
            'chmod -R 750 %s/bin' % self.mysql_base
        ]
        for cmd in cmd_mod:
            subprocess.call(cmd, shell=True)
        Logger(self.file_name).get_logger().info("start init mysql...")
        result = self.mysql_base + '/bin/mysqld ' + ' --defaults-file=' + self.mysql_data_path + '/my.cnf.' \
                     + self.port + ' --initialize-insecure --user=mysql >>/dev/null 2>&1'
        if not subprocess.call(result, shell=True) == 0:
            Logger(self.file_name).get_logger().info("mysql init fail")
            return False
        shutil.copy(self.mysqlserver, self.service)
        cmds = [
            "sed -i 's/PORT/%s/g' %s" % (self.port, self.service),
            "sed -i 's/PATH_ROOT/\%s/g' %s" % ('/data', self.service),
            "chmod 755 %s" % self.service, "rm -rf /etc/my.cnf",
            "rm -rf %s/my.cnf" % self.mysql_base
        ]
        for cmd in cmds:
            subprocess.call(cmd, shell=True)
        Logger(self.file_name).get_logger().info("mysql starting......")
        result3 = '{0} restart'.format(self.service)
        if not subprocess.call(result3, shell=True) == 0:
            Logger(self.file_name).get_logger().info("mysql start fail")
            return False

        return True
  .. code-block:: python
    :linenos:
    
    from demo_utils.shell import Shell
    
    sh = Shell()
    
    sh.run('ls')
    sh.set_cwd('~')
    sh.set_cwd('/')
    sh.run('ls')
'''
import os, subprocess
from logs import Logger

logger = Logger('Shell').getLogger()

# The shell class defines just a few functions which can make executing commands easier
# run
#   This command has two possible ways to be called
#   (command) which is just a single line with all arguments
#   (command, args) which simply just joins a string with the command and arguments
#
# It is also possible to change the current working directory (cwd)
# for commands which are sensitive to file locations
# (Unfortunately 'cd' doesn't work)

# If no arguments are passed for args, then it is assumed to be a string of length 0
# If no arguments are passed to the constructor we assume default cwd

class Shell:
Ejemplo n.º 10
0
# Script which installs Zeppelin as an Ambari Service
import config, sys, platform, json, time, os
from shell import Shell
from curl_client import CurlClient
from logs import Logger

logger = Logger('service_installer').getLogger()


def install_hdp_select():
    '''Installs the hdp-select package.
  
  This is used to help install the Ambari Service for NiFi/Zeppelin. Retrieves the currently installed hadoop version.
  
  Typically the Sandbox has hdp-select already, but clusters won't always have it, which is why we provide this method.
  
  
  '''
    logger.info('Determining whether system is compatible with hdp-select')
    dist_info = platform.linux_distribution()
    if (len(dist_info[0]) == 0):
        logger.critical('Non Linux System. Could not install hdp-select')
        raise EnvironmentError(
            'You must be running a linux distribution to install hdp-select')


#  Only want to get distro name
#  Determine first distro name
#    Then determine the version (first char char for centos )
    distro = dist_info[0].lower()
    fullname = distro
    "ServiceInfo" : {
      "cluster_name" : "Sandbox",
      "maintenance_state" : "OFF",
      "service_name" : "YARN",
      "state" : "INSTALLED"
    }
  }



'''
import json, time
from curl_client import CurlClient
from logs import Logger

logger = Logger('Ambari').getLogger()


class Ambari:
    '''Initalize the Ambari client
    
    Args:
      username (str, optional): username to use for authentication (should have admin access)
      password (str, optional): password to use for authentication (should have admin access)
      proto (str, optional): Must be one of 'http' or 'https'. Defines which protocol to use. Defaults to 'http'
      server (str, optional): The hostname (or IP)  of the Ambari server. Defaults to 127.0.0.1.
      port (int, optional): The port that ambari server is running on. Defaults to 8080
      service_wait_time (int, optional): The time (in seconds) we should before we decide a service has failed changing states.
      config (dict, optional): This is a dictionary object which should contain the any of the keys 'username', 'password', 'proto', 'server', 'port', or 'service_wait_time'. Given the config here you can set any of the client's parameters through this object. However, when using this, the config object will override any of the specific arguments passed.
      
    Returns:
Ejemplo n.º 12
0
import json

try:
    from datetime import datetime

    from logs import Logger, LoggerMode
except BaseException as e:
    print("Import Error: ", e)

# Global Var
output_file = "logs_{}.log".format(datetime.now().strftime("%d%m%Y_%H%M%S"))
logs = Logger(output_file=output_file, log_level=LoggerMode.DEPLOY)
config_data = {}


def import_config(file_path):
    global config_data
    with open(file_path, "r") as data:
        config_data = json.load(data)


def get_config_for(module):
    if len(config_data) == 0:
        import_config("config/conf.json")
    return config_data[module]
Ejemplo n.º 13
0
import os.path
import struct
from logs import Logger
import importlib.util
from utils import AsyncResponse
import asyncio
import fcgiVariables

logger = Logger('./')

class Handler:
  def handleRequest(self, req, res):
    pass

class StaticHandler(Handler):
  def handleRequest(self, req, res):
    if os.path.isfile(f'.{req.url}'):
      f = open(f'.{req.url}', 'r')


      res.set_body(f.read())

      if (req.url.split('.')[1] == 'html'):
        res.set_headers({ 'content-type': 'text/html' })
  
      f.close()
      res.send()
    else:
      logger.logError(404)
      res.status_code = 404
      res.send()
Ejemplo n.º 14
0
'''A client for executing HTTP requests via cURL. It does not use any external libraries. The only requirement is that you have the curl executable in your PATH or bin.

Used to make arbitrary http(s) requests.
'''
import json, logging
from shell import Shell
from logs import Logger

logger = Logger('CurlClient').getLogger()


class CurlClient:
  '''The CurlClient object. Houses the parameters used when making requests

  Args:
    username (str, optional): Username for basic authentication. Default empty string
    password (str, optional): Password for basic authentication. Default empty string
    proto (str, optional): protocol used in requests. Must be one of http or https
    server (str, optional): The server or hostname to which we will make the request. Defaults 127.0.0.1
    port (int, optional): The integer number of the port that the server runs on. Defaults 8080
  '''
    
  username = ''
  '''Username for basic authentication'''
  
  password = ''
  '''Password for basic authentication'''
  
  server = ''
  '''Server or hostname to connect to'''
  
Ejemplo n.º 15
0
 def __init__(self, name, filename):
     self.logger = Logger(filename)
     self.igfunctions = {}
     self.igmemory = {}
     self.igfunc = None
Ejemplo n.º 16
0
        run_dir = os.path.join('..', 'runs', now)
        os.makedirs(os.path.join(run_dir, 'checkpoints'), exist_ok=True)
        os.makedirs(os.path.join(run_dir, 'logs'), exist_ok=True)
        early_stopping_score = None
        early_stopping_counter = 0
        step = 0

    # Initialize EarlyStopping
    early_stopping = EarlyStopping(step=step,
                                   run_dir=run_dir,
                                   best_score=early_stopping_score,
                                   counter=early_stopping_counter,
                                   verbose=False)

    # Initializing logger
    logger = Logger(os.path.join(run_dir, 'logs'))

    # Initialize data loaders
    dataset = ClusterDataset(files=parse_data_structure(hp.files),
                             feature_mean=np.load('mu.npy'),
                             feature_std=np.load('std.npy'))
    loader = DataLoader(dataset=dataset,
                        batch_size=hp.training.batch_size,
                        num_workers=hp.training.num_workers,
                        pin_memory=True)

    # Auto select best algorithm to maximize GPU utilization
    cudnn.benchmark = True

    # Execute train loop
    # try:
Ejemplo n.º 17
0
                self.con.set(route_ip, latest_frequency)
                expire_time = int(block_time) - block_delta
                self.con.expire(route_ip, expire_time)
            # 检查防火墙禁用该ip策略是否还在, 不在则加上
            if self.check_firewall(ip):
                lg.logger.info('IP仍在禁用中且已存在防火墙策略,无需重复添加')
            else:
                self.add_firewall(ip)
                lg.logger.info('未过期但无防火墙策略,已重新添加')

    def block_ip(self):
        for k, v in self.nl.get_ip_frequency().items():
            exist_history = self.get_block_ip_history(k)
            ip = k.split(':')[1]
            if exist_history:
                self.history_ip_add_firewall(k, v, ip)
            else:
                self.new_ip_add_firewall(k, v, ip)


if __name__ == '__main__':
    lg = Logger('/var/log/nginx/blockip.log', level='info')
    nl = NginxLog()
    # 先清除临时生成的log文件
    nl.clear_tmp_log()
    bi = BlockIp()
    # 先清除已有过期防火墙策略
    bi.clear_expire_firewall()
    # 后将超过访问限制的IP加入防火墙
    bi.block_ip()
Ejemplo n.º 18
0
            checkpoint['optimizer_generator_state_dict'])
        optimizer['discriminator'].load_state_dict(
            checkpoint['optimizer_discriminator_state_dict'])
        scaler['generator'].load_state_dict(
            checkpoint['scaler_generator_state_dict'])
        scaler['discriminator'].load_state_dict(
            checkpoint['scaler_discriminator_state_dict'])
        phase = checkpoint['phase']
        step = checkpoint['step']
    else:
        phase = 0
        step = 0
        run_dir = utils.core.get_run_dir(process_group, args.local_rank)

    # Initializing logger
    logger = Logger(os.path.join(run_dir,
                                 'logs')) if args.local_rank == 0 else None

    # Auto select best algorithm to maximize GPU utilization
    cudnn.benchmark = True

    # Start main loop
    try:
        training(model, optimizer, criterion, scaler, logger, process_group,
                 run_dir)
    except KeyboardInterrupt:
        pass
    finally:
        if args.local_rank == 0:
            utils.core.save_checkpoint(run_dir, utils.core.ddp(model),
                                       optimizer, scaler, phase, step)
            print('Saved checkpoint.')
    gen = DataGenerator(rand_schema)
    # Generate a single 'row' of data
    print(gen.generate())


'''
#!/usr/bin/python
# -*- coding: utf-8 -*-

import json
import config
import random
from abc import abstractmethod
from logs import Logger

logger = Logger('Generator').getLogger()


class DataGenerator:
  '''The generator object. Pass the configuration here. use the generate() method to get a random object
  
  Args:
    schema (str): The schema file or JSON string which defines the data to be generated.
    seed (str, optional): The seed value for the generator
  
  '''

  def __init__(self, schema, seed=''):
    self.rand = random.Random()
    self.data_fields = []
    self.field_names = []