Example #1
0
# coding: utf-8

import mat4py as mp
import evaluate as ev
import numpy as np
import readfile as rf
import log
import appendix as ap

datasets = ['yeast',  'scene', 'CAL500', 'emotions', 'enron', 'genbase', 'medical',
            'corel5k', 'Corel16k001', 'Corel16k002', 'Corel16k003',
            'rcv1subset1', 'rcv1subset5']

algorithms = ['LPLC']

l = log.Log('LPLC')

for dataset in datasets:
    for algorithm in algorithms:
        file = mp.loadmat('../LPLC_result/%s.mat' % dataset)
        for k in range(10):

            path_label = '../datasets/%s/%s.xml' % (dataset, dataset)
            param = {
                'fold': k,
                'algorithm': algorithm,
                'dataset': dataset,
                'labels': rf.read_labels_from_xml(path_label)
                }

            y_tr = file['save_fold_%d_y_tr' % (k + 1)]
Example #2
0
    def getCellDataLua(self,r,c):
        val = self.table.cell(r,c).value
        if self.colType[c] == 'LS':
            val = str(val)
            if isinstance(val, unicode):
                val = val.encode('utf-8')
            val = val.replace('\r','\\r').replace('\n','\\n')

            valArray = val.split('|')
            valArraySize = len(valArray)
            strArray = "{"
            for i in range(0,valArraySize):
                if valArray[i] == '':
                    continue
                strArray += "\"" + valArray[i] + "\""
                if i < valArraySize - 1:
                    strArray += ","
            strArray += "}"
            #print strArray
            val = '%s'%strArray

        elif self.colType[c] == 'LI':
            val = str(val)
            if isinstance(val, unicode):
                val = val.encode('utf-8')
            valArray = val.split('|')
            valArraySize = len(valArray)
            strArray = "{"
            for i in range(0,valArraySize):
                if valArray[i] == '':
                    continue
                valArray[i] = "%d" % int(round(float(valArray[i])))
                if valArray[i].find('.') != -1:
                     log.Log("%s 文件第%d行 %d列使用 数据类型错误" % (self.fname1, r+1,c+1))
                strArray += valArray[i]
                if i < valArraySize - 1:
                    strArray += ","
                    #self.checkChkItem(valArray[i],r,c)
            strArray += "}"
            #print strArray
            val = '%s'%strArray
        elif self.colType[c] == 'LF':
            val = str(val)
            valArray = val.split('|')
            valArraySize = len(valArray)
            strArray = "{"
            for i in range(0,valArraySize):
                #print valArray[i]
                strArray += valArray[i]
                if i < valArraySize - 1:
                    strArray += ","
            strArray += "}"
            #print strArray
            val = '%s'%strArray
            pass

        elif self.colType[c] == 'STRING':
            if type(val) == type(u'') or type(val) == type('')  :
                if isinstance(val, unicode):
                    val = val.encode('utf-8')
            val = str(val)
            val = val.replace('\r','\\r').replace('\n','\\n')
            val = val.replace('"','\\"');
            #val = '"%s"'%val
            if val.endswith(".0"):
                val = val.replace(".0","")
            val = '\"' + val + '\"'
        elif self.colType[c] == 'FLOAT':
            try:
                if val == '':
                    val = '-1'
                val = float(val)
                val = ("%.2f"%val)
            except Exception as e:
                log.Log("%s 文件第%d行 %d列使用 数据类型错误" % (self.fname1, r+1,c+1))
                print("%s 文件第%d行 %d列使用 数据类型错误" % (self.fname1, r+1,c+1))
        else :
            try:
                if val == '':
                    val = '-1'
                val = int(val)
                val = ("%d"%val)
            except Exception as e:
                log.Log("%s 文件第%d行 %d列使用 数据类型错误" % (self.fname1, r+1,c+1))
                print("%s 文件第%d行 %d列使用 数据类型错误" % (self.fname1, r+1,c+1))
        return val
Example #3
0
import configparser
from email.message import EmailMessage
import os
import smtplib
import sys

import log

config = configparser.ConfigParser()
config.read(os.path.join(sys.path[0], "config.ini"))
SMTP_CONFIG = config["SMTP"]

log = log.Log()


def send_email(to_addr, subject, content, debug=True):
    if not all(
        [x in os.environ for x in ["SMTP_LOGIN", "SMTP_PASS", "SMTP_FROM"]]):
        log.add("env variables missing")
        raise KeyError("missing environmental variables needed for SMTP!")

    msg = EmailMessage()
    msg["Subject"] = subject
    msg["From"] = os.environ.get("SMTP_FROM")
    msg["To"] = to_addr
    msg.set_content(content)

    try:
        if debug:
            server = smtplib.SMTP(SMTP_CONFIG["DEBUG_SERVER"],
                                  SMTP_CONFIG["DEBUG_PORT"])
Example #4
0
async def on_ready():
    for usr in client.get_all_members():
        log.Log(usr.name)
        points[usr.name] = 0

    log.Log(str(len(points)) + ' users logged.')
Example #5
0
logpath = os.path.join(cf.Log_dir, "gini2nc")

channel_list = ncvdefs.sat_channels
proc_script = "gini2nc.py"

# -------------
# loop over the satellite list to process both east and west
# -------------
for sat in xrange(len(cf.Satellite_list)):
    out_dir = os.path.join(out_base_dir, cf.Satellite_list[sat])
    sat_in_dir = os.path.join(in_base_path, cf.Satellite_list[sat])
    # -------------
    # loop over the channel directories
    # -------------
    for dir in xrange(len(channel_list)):
        curr_in_dir = os.path.join(sat_in_dir, channel_list[dir])
        command = "%s -l %s %s %s %s %s %s %s" % (
            proc_script, logpath, curr_in_dir, out_dir, cf.Gini_cdl_list[sat],
            params_dir, channel_list[dir], cf.Satellite_list[sat])

        #print "command = %s " % command
        if options.test:
            print command
        else:
            logg = log.Log(logpath, ncvdefs.PYL_SUFFIX)
            logg.write_time("Running: %s\n" % command)
            logg.close()
            ret = os.system(command)

# -------------------------
Example #6
0
 def __init__(self):
     self.name = "Petcaugh"
     self.logfile = l.Log(self.name + ".txt")
     self.shouldILog = True
Example #7
0
rawfiles = ""
if os.path.exists(curfile):
    rawfiles = curfile

prev_tup = time.gmtime(ptime - 3600)
prevfile = os.path.join(indir, time.strftime("%Y%m%d_%H.metar", prev_tup))

if os.path.exists(prevfile):
    rawfiles = rawfiles + " " + prevfile

if rawfiles == "":
    if options.test:
        print "No recent input files found."
    else:
        logg = log.Log(os.path.join(logpath, "metar2nc"), "pyl")
        logg.write_time("Error: No recent input files found.\n")

    sys.exit(1)

# Create output filename if using the -f option
fname = time.strftime("metar.%Y%m%d.%H%M.nc", pres_tup)
dated_dir = time.strftime("%Y%m%d", pres_tup)
outdir = os.path.join(cf.Raw_metar_netcdf_dir, dated_dir)
outfile = os.path.join(outdir, fname)
tmpfile = os.path.join(tmpdir, fname)

if not os.path.isdir(outdir):
    os.makedirs(outdir)

# Use this form to decode to output files with names chosen by the decoder
Example #8
0
import random
import re
import socket
import time
import urllib2

from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities

from ms_constants import WEB_DRIVER_CHROME, WEB_DRIVER_PHANTOMJS, PHANTOMJS, \
    CHROME, HEADER_CHROME_1, HEADERS, PHANTOMJS_LOG
from ms_exceptions import *
import log


LOG = log.Log()

dcap = dict(DesiredCapabilities.PHANTOMJS)
DEFAULT_TIMEOUT = 30


def get_html_content(url, url_log=True):
    # if isinstance(PROXY_HOST, str) and isinstance(PROXY_PORT, int):
    #     proxy_info = {'host': PROXY_HOST, 'port': PROXY_PORT}
    #     print proxy_info
    #     proxy_support = urllib2.ProxyHandler(
    #         {'http': 'http://%(host)s:%(port)d' % proxy_info})
    #     print {'http': 'http://%(host)s:%(port)d' % proxy_info}
    #     opener = urllib2.build_opener(proxy_support)
    #     urllib2.install_opener(opener)
    url = str(url).strip()
Example #9
0
import copy
import pickle
import random
from equippables.items.modules import *
from equippables.items.weapons import *
from equippables.items.cargo import *
import itemManipulation
import myFunctions
from customFormat import *
from init import *
import commands
import menus
import story
import gamemanager as gm

logger = log.Log("StarTheoryLogs")


class Stats(object):

    def __init__(self, hitpoints=500, evasion=100, hull=100, shield=100):

        # Defenses
        self.hitpoints = hitpoints
        self.evasion = evasion
        self.hull = hull
        self.shield = shield
        self.energy = 0

        self.name = "No-Name"
        self.level = 1
Example #10
0
    if 'dbase' not in options: options['dbase'] = 'chemtrayzer.sqlite'
    if 'files' not in options: options['files'] = 'tmp_'
    if 'type' not in options: options['type'] = 'log'
    if 'fail' not in options: options['fail'] = 'keep'
    if 'norm' not in options: options['norm'] = 'keep'

    # . defaults: master
    if 'all' not in options: options['all'] = False
    if 'pressure' not in options: options['pressure'] = 1E5

    return options


#######################################
if __name__ == '__main__':
    log = Log.Log(Width=70)

    ###	HEADER
    #
    text = [
        '<.reac-file(s)> [-main <main>] [-step <timestep>] [-start <start>] [-end <end>]',
        '-source <QM folder> -dbase <database> [-files <filename>] [-type <file extension>] [-fail <behavior on failure>] [-norm <behavior on success>]',
        '[-all <kinetic model size>] [-pressure <pressure/Pa>]', '',
        'analyzing options:',
        '-reac: name of .reac file produced via processing.py / simulation.py',
        '-main: name for output of analyzing.py (default="Default")',
        '-step: give the timestep of the simultaion in [fs] (default=0.1)',
        '-start: skip timesteps smaller than <start> for rate computation (default=0)',
        '-end: end-flag for rate computation (default=-1)', '',
        'harvesting options:',
        '-source: name of folder containing the QM results (default="QM")',
Example #11
0
 def set_compatible_timestamps(self):
     if Globals.chars_to_quote.find(":") > -1:
         SetConnections.UpdateGlobal('use_compatible_timestamps', 1)
         Time.setcurtime(
             Time.curtime)  # update Time.curtimestr on all conns
         log.Log("Enabled use_compatible_timestamps", 4)
Example #12
0
def part1():
    # Starting point
    log_entries = []
    guards = []
    guard_re = re.compile("([0-9]+)")

    # Open the file and read each line
    filename = "./day04/day04input.txt"
    #filename = "./day04/sample.txt"
    with open(filename, "r") as infile:
        for line in infile:
            # Skipping the first char, split the line based on the end of the timestamp
            parts = line[1:].split("] ")

            # Parse the timestamp, and create a new Log object using that timestamp
            date = datetime.datetime.strptime(parts[0], "%Y-%m-%d %H:%M")
            log_entries.append(log.Log(date, parts[1].strip()))

    # Sort the log entries so we can process them again
    log_entries.sort(key=lambda log: log.timestamp, reverse=False)

    # Now we can go through the log entries
    # We first look for a "Guard #" entry
    # After that, we look for a "falls asleep" entry, followed by a "wakes up"
    # We then process the guard entry with the time they were asleep

    current_entry = 0
    while current_entry < len(log_entries):
        match = guard_re.search(log_entries[current_entry].activity)
        if match:
            # Get the guard ID
            guard_id = int(match.group(0))
            current_entry += 1

            while current_entry < len(log_entries) and log_entries[
                    current_entry].activity.startswith("falls"):
                # Find out when they fell asleep and woke up
                sleep_time = log_entries[current_entry].timestamp.minute
                current_entry += 1
                wake_time = log_entries[current_entry].timestamp.minute
                current_entry += 1

                # Find the guard in the list - if he's not there, add him
                found_guard = False
                for i in range(len(guards)):
                    if guards[i].id == guard_id:
                        guards[i].set_sleep(sleep_time, wake_time)
                        found_guard = True
                        break
                if not found_guard:
                    new_guard = guard.Guard(guard_id)
                    new_guard.set_sleep(sleep_time, wake_time)
                    guards.append(new_guard)

    # Now we scan the list of guards and find the one with the highest sleep
    total_sleep = 0
    sleepy_guard = None

    for current_guard in guards:
        if current_guard.get_total_sleep() > total_sleep:
            total_sleep = current_guard.get_total_sleep()
            sleepy_guard = current_guard

    print(
        f"Part 1: Sleepy guard is {sleepy_guard.id}, likely sleeptime is {sleepy_guard.most_likely_sleeptime()}, product is {sleepy_guard.most_likely_sleeptime() * sleepy_guard.id}."
    )

    # Part 2 - which guard is most frequently asleep on the same minute
    sleepiest = 0
    sleepiest_time = 0
    sleepiest_guard = None
    for current_guard in guards:
        most_likely_sleeptime = current_guard.most_likely_sleeptime()
        if current_guard.sleepcount[most_likely_sleeptime] >= sleepiest:
            sleepiest_time = most_likely_sleeptime
            sleepiest = current_guard.sleepcount[most_likely_sleeptime]
            sleepiest_guard = current_guard

    print(
        f"Part 2: Sleepy guard is {sleepiest_guard.id}, sleepiest is {sleepiest_time}, product is {sleepiest_guard.id * sleepiest_time}."
    )
Example #13
0
def main():

    # Variables Generales...
    Errores = 0
    miLog = log.Log()
    GestorSQLServer = ta_SQLServer.SQLServer

    miLog.Salidaln(
        "Bienvenido al Manager de  Sentinel Google, Iniciando servicios...")
    Lista = []

    try:
        # Creación del -Gestor de SQL Server
        GestorSQLServer = ta_SQLServer.SQLServer()

    except Exception as e:
        Errores += 1
        miLog.Salidaln("ERROR generando Gestor de SQL Server...")
        miLog.Salidaln(e.args)
        return -1

    try:

        GestorSQLServer.ListaTweetsGoogle(Lista)

    except Exception as e:
        Errores += 1
        miLog.Salidaln("ERROR No se ha podido generar Sentinel de Google... ")
        miLog.Salidaln(e.args)

    miLog.Salidaln("OK.")

    # Instanciamos el Cliente Google
    miLog.Salida("Instanciando idioma Google... ")

    miLog.Salidaln("OK")
    miLog.Salidaln("Analizando Sentinel... " + str(len(Lista)) +
                   " elementos...")
    for Elemento in Lista:
        try:
            language_client = language.Client()
            #miLog.Salida("C")
            CadenaLimpia = Elemento.m_Texto
            CadenaLimpia = CadenaLimpia.replace("'", '-')
            CadenaLimpia = CadenaLimpia.replace('"', '-')

            #miLog.Salida("\bG")
            documentGoogle = language_client.document_from_text(CadenaLimpia)
            #miLog.Salida("\bS")
            sentimentGoogle = documentGoogle.analyze_sentiment().sentiment

            #Elemento.m_SentinelGoogle = sentimentGoogle.score
            #Elemento.m_AccuracyGoogle = sentimentGoogle.magnitude
            GestorSQLServer.ActualizaSentinelGoogleTweet(
                Elemento.m_idTweet, sentimentGoogle.score,
                sentimentGoogle.magnitude)

        except:
            miLog.Salida("E")
            GestorSQLServer.ActualizaSentinelGoogleTweet(
                Elemento.m_idTweet, 0, 0)

    miLog.Salidaln("OK.")

    #GestorSQLServer.ActualizaSentinelGoogle(Lista)

    GestorSQLServer.m_conSQL.close()

    if (Errores > 0):
        miLog.Salidaln("ERRORES DETECTADOS")
    else:
        miLog.Salidaln("Proceso finalizado con exito...")
Example #14
0
#!/usr/bin/python3

import log
myLog = log.Log("plp.log")
myLog.add("PLP start up checks beginning")

import plpHelper
plpHelper.startupChecks(myLog)

myLog.add("PLP start up checks passed")

# imports
import pauser
import decider

tv = pauser.Pauser()
myDecider = decider.Decider()

myLog.add("PLP ready...starting to listen")

while True:  # check for a noise

    if myDecider.isNoise():  # there is noise

        if not tv.isPaused():  # not paused, so go ahead and pause
            tv.pause()

    else:  #there is NOT noise now

        if tv.isPaused():  # then restart the TV
            tv.play()
Example #15
0
 def __init__(self):
     self.name = "Standard"
     self.logfile = l.Log(self.name + ".txt")
     self.shouldILog = True
Example #16
0
 def __init__(self, parent=None):
     super(ExternalCommandDialog, self).__init__(parent)
     self.log = log.Log(self)
     self.setMainWidget(self.log)
     self.finished.connect(self._closed)
Example #17
0
 def __init__(self):
     self.name = "Miguel"
     self.logfile = l.Log(self.name + ".txt")
     self.shouldILog = False
     self.print = True  # Testing
 def __init__(self, config, node):
     self.log = log.Log(config, node)
     #self.log.registerReceive(self.receive)
     self.node = node
Example #19
0
 def __init__(self):
     self.name = "Chris Bot"
     self.logfile = l.Log(self.name + ".txt")
     self.shouldILog = True
Example #20
0
def single_training_process(algorithm, dataset, fold=10):

    # ----------------- .csv源文件数据分割 ------------------------

    # 分割未分割.csv文件中存储的数据至~_x.csv,~_y.csv两个文件中
    # path_y_label = '../datasets/corel16k/corel16k001.xml'
    # path_dataset = '../datasets/corel16k/corel16k001.csv'
    # rf.divide_csv(path_dataset, rf.read_labels_from_xml(path_y_label))

    # --------------------- 数据来源是.mat ------------------------
    # 读取.mat文件中存储的数据集
    # x_tr, y_tr = rf.read_mat('../datasets/CAL500_train.mat')
    # x_te, y_te = rf.read_mat('../datasets/CAL500_test.mat')
    # x_tr = np.array(x_tr)
    # y_tr = np.array(y_tr)
    # x_te = np.array(x_te)
    # y_te = np.array(y_te)

    # ---------------------- 生成Log文件 --------------------------
    l = log.Log('default-%s-%s' % (algorithm, dataset))

    # --------------------- 数据来源是.csv ------------------------
    # 读取来自_x.csv,_y.csv文件的输入、金标准数据以及其label

    # 日志log文档记录点
    behave = 'start reading dataset'
    l.write_format_log(behave, dataset, algorithm)

    path_x = '../datasets/%s/%s_x.csv' % (dataset, dataset)
    path_y = '../datasets/%s/%s_y.csv' % (dataset, dataset)
    x, x_label = rf.read_csv(path_x)
    y, y_label = rf.read_csv(path_y)

    # 日志log文档记录点
    behave = 'finish reading dataset'
    l.write_format_log(behave, dataset, algorithm)

    # 随机分为fold折进行交叉验证,每一折的测试集序号

    m = int(math.floor(np.size(x, axis=0)/fold))
    indices = np.reshape(random.sample(range(m*fold), m*fold), [fold, m])

    # 第k次检验,选择训练集、测试集和验证集
    for k in range(fold):

        # 日志log文档记录点
        behave = 'pre-processing'
        l.write_format_log(behave, dataset, algorithm, fold=k)

        v = 4   # 3+1 3份用于确定优化顺序,1份用于确定优化截止点
        group_tr = list(range(fold))
        group_tr.remove(k)
        group_te = k
        group_va = [(k + e) % fold for e in range(1, v+1)]

        indice_tr = np.reshape(indices[group_tr], [m*(fold-1)])
        indice_te = np.reshape(indices[group_te], [m])
        indice_va = np.reshape(indices[group_va], [m*v])

        x_tr = x[indice_tr]
        y_tr = y[indice_tr]
        x_te = x[indice_te]
        y_te = y[indice_te]
        x_va = x[indice_va]
        y_va = y[indice_va]

    # -------------------------------- 选择使用的前置算法 ------------------------------------
        # 在这里调用所查到的算法代码,其输入训练数据x_tr, y_tr和测试集的x_te,输出为算法预测结果y_
        # y_ = function(x_tr, y_tr, x_te)
        # 注意!y_te不能够输入至算法内!

        # 日志log文档记录点
        behave = 'original training'
        l.write_format_log(behave, dataset, algorithm, fold=k)

        y_te_, y_va_ = pred.predictor(algorithm, x_tr, y_tr, x_te, x_va)
    # -------------------------------- 选择使用的前置算法 ------------------------------------

        # 通过后缀算法更改预测结果
        param = {
            'fold': k,
            'algorithm': algorithm,
            'dataset': dataset,
            'labels': y_label
            }

        # 日志log文档记录点
        behave = 'appendix training'
        l.write_format_log(behave, dataset, algorithm, fold=k)

        ap.regression_method(y_tr, y_va, y_va_, y_te, y_te_, param)
  if (len(args) < 5):
      print "Error: incorrect number of arguments"
      usage(sys.argv[0])
      sys.exit(2)
  
  # Get the command line args
  input_dir = args[0]
  mdl_base = args[1]
  site_list = args[2]
  cdl_file = args[3]
  output_dir = args[4]
  exec_name = "decoded2modfcst"
  
  # Set up log file
  if log_base:
      logf = log.Log(log_base)
      logf.set_suffix(sys_path.LOG_PY_SUFFIX)
      log_str = "-l %s" % log_base
  else:
      logf = log.Log("")
      log_str = ""
 
  logf.write_time("Starting.\n")
  
  # Set up strings for user-supplied date and real-time
  if utime:
      curr_date = utime[:8]
      curr_time = utime[9:]
  else:
      curr_date = time.strftime("%Y%m%d", time.gmtime(time.time()))
      curr_time = time.strftime("%H%M", time.gmtime(time.time()))
Example #22
0
def test_Log_valid():
    assert log.Log().save(entry) == "Database found."
Example #23
0
 def __init__(self):
     self.log = log.Log()
     pass
Example #24
0
    def __init__(self, logs_dir, interface):
        # Definition variables
        self.plugin_folder = "%s\\plugins\\server_minecraft\\" % (
            os.path.dirname(os.path.abspath(__file__)))
        self.config_file = '%sconfig.txt' % (self.plugin_folder)
        # Démarrage des services de logs
        self.Log_ex = log.Log("Server Minecraft", logs_dir)
        self.Log_ex.append("Plugin loaded", "info")
        print("[server_Minecraft]start Plugin")

        self.configuration = configuration.Config(
            "%s\\plugins\\" % (os.path.dirname(os.path.abspath(__file__))),
            "server_minecraft")
        self.interface = interface

        # Check du service de config
        # Vérification du dossier
        if not self.configuration.read_value():
            self.start_pl = False
            self.Log_ex.append("No config file : plugin will not start.",
                               "warn")
            self.configuration.append("server_name: exemple")
            self.configuration.append("ip: 0.0.0.0")
            self.configuration.append("port: 1234")
            self.configuration.append("pass: 0123456789")
        #Create value in the config file if missing
        if "server_name" not in self.configuration.read_value():
            self.start_pl = False
            self.configuration.append("server_name: exemple")
            self.Log_ex.append(
                "Config file found but argument missing, creating.", "warn")
        if "ip" not in self.configuration.read_value():
            self.start_pl = False
            self.configuration.append("ip: 0.0.0.0")
            self.Log_ex.append(
                "Config file found but argument missing, creating.", "warn")
        if "port" not in self.configuration.read_value():
            self.start_pl = False
            self.configuration.append("port: 1234")
            self.Log_ex.append(
                "Config file found but argument missing, creating.", "warn")
        if "pass" not in self.configuration.read_value():
            self.start_pl = False
            self.configuration.append("pass: 0123456789")
            self.Log_ex.append(
                "Config file found but argument missing, creating.", "warn")
        #Else we start
        else:
            self.start_pl = True
            self.ip_port = "%s:%s" % (self.configuration.read_value()['ip'],
                                      self.configuration.read_value()['port'])
            print("[server_Minecraft] Config file, plugin launch")
            print(
                "[server_Minecraft] Start the plugin with the server on : %s" %
                (self.ip_port))
            self.Log_ex.append("Config file found", "info")

            self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            self.result = self.sock.connect_ex(
                (self.configuration.read_value()['ip'],
                 int(self.configuration.read_value()['port'])))

            if self.result == 0:
                self.start_pl = True
                print("[server_Minecraft]Your server is ok...")
            else:
                self.start_pl = False
                print(
                    "[server_Minecraft]Your server is broken or the port is not open..."
                )
        schedule.every().minute.do(self.job_server_minecraft)
Example #25
0
    def check(self):
        if self.frows < 5:
            log.Log("%s 文件行数小于5"%self.fname1)
            return False
        if len(self.fname1.split("_")) < 2:
            log.Log("%s 文件名格式错误" % self.fname1)
            return False
        for c in range(self.fcols):
            #所有字段名
            str = self.table.cell(2,c).value
            if str in self.allKey:
                log.Log("%s 文件第%d列有重复的字段名"%(self.fname1, c+1))
                continue
            self.allKey.append(str)
        #获取规则
        for i in range(self.fcols):
            str = self.table.cell(1,i).value
            try:
                items = {}

                max = self.getIntItem(r'<max=(.*?)>', str, i)
                #print(max)

                min = self.getIntItem(r'<min=(.*?)>', str, i)
                #print(min)

                defa = self.getStrItem(r'<def=(.*?)>', str, i)
                #print(defa)

                leng = self.getIntItem(r'<len=(.*?)>', str, i)
                #print(leng)

                set = self.getStrItem(r'<set=(.*?)>', str, i)
                if set:
                    set = set.split(',')
                #print(set)

                chk = self.getStrItem(r'<chk=(.*?)>', str, i)
                if chk:
                    chk = chk.split(',')
                #print(chk)

                svr = self.searchItem(r'<svr>', str)
                #print(svr)

                des = self.searchItem(r'<des>', str)
                #print(des)

                i18n = self.searchItem(r'<i18n>', str)
                #print(i18n)

                items['max'] = max
                items['min'] = min
                items['def'] = defa
                items['set'] = set
                items['chk'] = chk
                items['svr'] = svr
                items['des'] = des
                items['len'] = leng
                items['i18n'] = i18n

                self.checkItems[i]=items
                #print(self.checkItems[i])
            except:
                log.Log("%s 文件第2行 %d列有不符合规则的字符串"%(self.fname1, i+1))
                print("##except")
        #获取数据类型
        for i in range(self.fcols):
            str = self.table.cell(0,i).value
            #print(str)
            if str == 'INT':
                self.colType[i] = 'INT'
            elif str == 'FLOAT':
                self.colType[i] = 'FLOAT'
            elif str == 'STRING':
                self.colType[i] = 'STRING'
            elif str == 'LS':
                self.colType[i] = 'LS'
                #print 'LS'
            elif str == 'LI':
                self.colType[i] = 'LI'
                #print 'LI'
            elif str == 'LF':
                self.colType[i] = 'LF'
                #print 'LF'
            else:
                self.colType[i] = None
                log.Log("%s 文件第%d行 %d列有不符合规则的数据类型" % (self.fname1, 1,i+1))
        #检查规则, 第五行开始
        for i in range(5,self.frows):
            for j in range(self.fcols):
                if not self.checkTableItem(i,j):
                    log.Log("%s 文件第%d行 %d列有不符合规则的数值" % (self.fname1, i+1,j+1))
        return True
Example #26
0
        '-v',
        dest='verbose',
        type=int,
        default=3,
        help='Verbose level from 0 to 4')
    return parser.parse_args()

args = parse_args()
shutdown = False
signal.signal(signal.SIGINT, signal_handler)

# Set log stuff
Log.verbose = args.verbose
Log.filename = args.log
Log.stdout = not args.quiet
log = Log.Log('main')

# Share statistics module
shares = share_stats.Shares()

# Start proxy cleaner thread
proxies = Proxy.ProxyDB()
t = threading.Thread(target=proxies.cleaner, args=[])
t.daemon = True
t.start()

# Set and start control thread
controller = control.Control(proxydb=proxies, sharestats=shares)
controller.listen_ip = args.control
controller.listen_port = args.control_port
controller.poolmap['pool'] = args.pool
Example #27
0
    def saveLua(self, path):
        name = os.path.split(self.fname)[1]
        if len(name.split("_")) < 2:
            log.Log("%s 文件名格式错误" % self.fname1)
        tableNmae = name.split("_")[0]
        lua = "%s/%s.lua"%(path,name.split("_")[0])
        print lua
        if not os.path.exists(os.path.dirname(lua)):
            os.makedirs(os.path.dirname(lua))
        outfile = open(lua,"wb")
        #outfile.write(codecs.BOM_UTF8)

        if self.isLanguage == True:
            #outfile.write("require('Common/ConstDefine')\r\n")
            outfile.write("local ConstDefine = ConstDefine\r\n")
        outfile.write("local log = Log.Create('%s')\r\n" % (tableNmae))
        outfile.write("module(...)\r\n")
        outfile.write("%s={}\r\n"%(tableNmae))

        outfile.write( "\r\n" )
        outfile.write( "\r\n" )

        outfile.write( "function Get(id)\r\n" )
        outfile.write( "\tlocal data = %s[id]\r\n"%(tableNmae) )
        outfile.write( "\tif data ~= nil then\r\n" )
        if self.isLanguage == True:
            outfile.write( "\t\treturn data[ConstDefine.C_LANGUAGE]\r\n" )
        else:
            outfile.write( "\t\treturn data\r\n" )
        outfile.write( "\telse\r\n" )
        #outfile.write( "\t\tprint( '不存在ID => ' .. id)\r\n" )
        outfile.write( "\t\tlog:Error( '不存在ID => ' , id)\r\n" )
        outfile.write( "\tend\r\n" )
        outfile.write( "end\r\n" )

        outfile.write( "\r\n" )
        outfile.write( "\r\n" )

        for r in range(self.frows):
            if r < 5:
                continue
            if self.skipRow(r):
                continue
            val = self.getCellData( r, 0 )

            key = val

            fileLine = "{"

            for c in range(self.fcols):
            	if self.skipClient(c):
            		continue
                Name = self.table.cell(2,c).value
                if isinstance(Name, unicode):
                    Name = Name.encode('utf-8')
                val = self.getCellDataLua( r, c )
                #line ='"%s"=%s'%(Name,val)
                line ='%s=%s'%(Name,val)
                if c < self.fcols-1:
                    line += ","
                if isinstance(line, unicode):
                    line = line.encode('utf-8')
                fileLine += line

            fileLine += '}'
            fileLine += "\r\n"
            outfile.write("%s[%s] = %s" % (tableNmae,key,fileLine))
            #outfile.write(fileLine)
        outfile.close()
Example #28
0
 def __init__(self):
     self.name = "Bot slayer"
     self.logfile = l.Log(self.name + ".txt")
     self.shouldILog = True
# -*- coding:utf-8 -*-
import sys, json
import dbs
import urllib.parse
import urllib.request
import os, time
sys.path.append("logger_python")
import log

LIST_NAME = "links"

logger = log.Log("sWang", "logger_python/logger.conf").logger


def submit(name, link):
    task = [{
        "name": name,
        "data": {
            "link": link
        },
        "retry": 3,
        "savefile": "true"
    }]
    data = {"tasks": json.dumps(task)}
    tmpData = urllib.parse.urlencode(data)
    f = urllib.request.urlopen(url='http://127.0.0.1:8097/submit',
                               data=tmpData.encode(encoding='utf-8',
                                                   errors='ignore'))
    ret = f.read().decode('utf-8').strip()
    logger.info(ret)
    return ret
Example #30
0
 def __init__(self, logging_dir):
     super().__init__(target=self.server_handler)
     self.should_stop = False
     self.log_name = os.path.join(logging_dir, "server")
     os.makedirs(self.log_name, exist_ok=True)
     self.log = log.Log(self.log_name)