import os
from multiprocessing import Process, Queue, Lock
import requests
import time

headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36'}
proc_num = 20
queue_size = 2048
url_set = set()
urls_queue = Queue(queue_size)
queue_lock = Lock()

save_path = "./fec_data"

def producer(src_file_path, urls_queue):
    with open(src_file_path, "r") as src_fr:
        index = 0
        ori_list = src_fr.readlines()
        for imgs_line in ori_list:
            for img_url in imgs_line.split(",")[0: 11: 5]:
                index = index + 1
                if 0 == index % 2000:
                    print("now process is %d / %d " % (index, len(ori_list)))
                if img_url in url_set:
                    continue
                else:
                    url_set.add(img_url)
                    while urls_queue.full():
                        time.sleep(1)
                    queue_lock.acquire()
                    urls_queue.put(img_url)
예제 #2
0
        except ClosedPoolError as e:
            #logger.fatal("Closed Pool Error exception posting to %s %r %r [will retry]\n"%(url,e,postdata))
            logcache.put(postdata)
        except Exception as e:
            logger.fatal("exception posting to %s %r %r [will not retry]\n"%(url,e,postdata))
            sys.exit(1)

if __name__ == '__main__':
    parser=OptionParser()
    parser.add_option("-u", dest='url' , default='http://localhost:8080/events/', help="mozdef events URL to use when posting events")
    (options,args) = parser.parse_args()
    sh=logging.StreamHandler(sys.stdout)
    sh.setFormatter(formatter)
    logger.addHandler(sh)
    #create a list of logs we can append json to and call for a post when we want.
    logcache=Queue()
    try:
        for i in range(0,10):

            print(i)
            alog=dict(eventtime=pytz.timezone('UTC').localize(datetime.now()).isoformat(),\
                        hostname=socket.gethostname(),\
                        processid=os.getpid(),\
                        processname=sys.argv[0],\
                        severity='INFO',\
                        summary='joe login failed',\
                        category='authentication',\
                        tags=[],\
                        details=[])
            alog['details']=dict(success=True,username='******')
            alog['tags']=['mozdef','stresstest']
예제 #3
0
파일: teemo.py 프로젝트: lycansfree/Teemo
def main():
    args = parse_args()
    domain = args.domain
    #threads = args.threads
    savefile = args.output
    ports = args.ports
    bruteforce_list = []
    subdomains = []

    if not savefile:
        now = datetime.datetime.now()
        timestr = now.strftime("-%Y-%m-%d-%H-%M")
        savefile = domain + timestr + ".txt"

    enable_bruteforce = args.bruteforce
    if enable_bruteforce or enable_bruteforce is None:
        enable_bruteforce = True

    #Validate domain
    if not is_domain(domain):
        print R + "Error: Please enter a valid domain" + W
        sys.exit()

    #Print the Banner
    banner()
    waring = "[!] legal disclaimer: Usage of Teemo for attacking targets without prior mutual consent is illegal. It is the end user's responsibility to obey all applicable local laws. Developers assume no liability and are not responsible for any misuse or damage caused by this program"
    print waring
    print B + "[-] Enumerating subdomains now for %s" % domain + W
    '''
    subdomains.extend(callsites(domain,proxy))
    domains,emails = callengines(domain,500,proxy)
    subdomains.extend(domains)
    #print subdomains
    '''

    #do zone transfer check
    # to do here

    Threadlist = []
    q_domains = Queue()  #to recevie return values
    q_emails = Queue()
    useragent = random_useragent(allow_random_useragent)

    if args.proxy != None:
        proxy = args.proxy
        proxy = {args.proxy.split(":")[0]: proxy}
    elif default_proxies != None and (proxy_switch == 2
                                      or proxy_switch == 1):  #config.py
        proxy = default_proxies
        try:
            sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            sk.settimeout(2)
            ip = default_proxies['http'].split("/")[-2].split(":")[0]
            port = default_proxies['http'].split("/")[-2].split(":")[1]
            sk.connect((ip, int(port)))
            sk.close
        except:
            print "\r\n[!!!]Proxy Test Failed,Please Check!\r\n"
            proxy = {}
    else:
        proxy = {}

    for engine in [
            Alexa, Chaxunla, CrtSearch, DNSdumpster, Googlect, Ilink, Netcraft,
            PassiveDNS, Pgpsearch, Sitedossier, ThreatCrowd, Threatminer
    ]:
        #print callsites_thread(engine,domain,proxy)
        t = threading.Thread(target=callsites_thread,
                             args=(engine, domain, q_domains, q_emails, proxy))
        Threadlist.append(t)
    for engine in [
            search_ask, search_baidu, search_bing, search_bing_api,
            search_dogpile, search_duckduckgo, search_exalead, search_fofa,
            search_google, search_google_cse, search_shodan, search_so,
            search_yahoo, search_yandex
    ]:
        if proxy_switch == 1 and engine in proxy_default_enabled:
            pass
        else:
            proxy = {}
        t = threading.Thread(target=callengines_thread,
                             args=(engine, domain, q_domains, q_emails,
                                   useragent, proxy, 500))
        t.setDaemon(True)
        Threadlist.append(t)
    #for t in Threadlist:
    #    print t
    for t in Threadlist:  # use start() not run()
        t.start()
    for t in Threadlist:
        t.join()

    while not q_domains.empty():
        subdomains.append(q_domains.get())
    emails = []
    while not q_emails.empty():
        emails.append(q_emails.get())

    if enable_bruteforce:
        print G + "[-] Starting bruteforce module now using subbrute.." + W
        record_type = False
        path_to_file = os.path.dirname(os.path.realpath(__file__))
        subs = os.path.join(path_to_file, 'subbrute', 'names.txt')
        resolvers = os.path.join(path_to_file, 'subbrute', 'resolvers.txt')
        process_count = 10
        output = False
        json_output = False
        bruteforce_list = subbrute.print_target(domain, record_type, subs,
                                                resolvers, process_count,
                                                output, json_output,
                                                subdomains)
        subdomains.extend(bruteforce_list)

    if subdomains is not None:
        subdomains = sorted(list(set(subdomains)))
        emails = sorted(list(set(emails)))
        subdomains.extend(
            emails
        )  #this function return value is NoneType ,can't use in function directly
        #print type(subdomains)

        #write_file(savefile, subdomains)

        if ports:
            print G + "[-] Start port scan now for the following ports: %s%s" % (
                Y, ports) + W
            ports = ports.split(',')  #list
            pscan = portscan(subdomains, ports)
            pscan.run()

        else:
            for subdomain in subdomains:
                print G + subdomain + W

    print "[+] {0} domains found in total".format(len(subdomains))
    print "[+] {0} emails found in total".format(len(emails))
    print "[+] Results saved to {0}".format(write_file(savefile, subdomains))
예제 #4
0
import json
from time import sleep
import requests
from flask import Flask, request, jsonify
from pprint import pprint
from multiprocessing import Manager, Queue
from threading import Thread
from uuid import uuid4

####################################################################################################
# Globals
####################################################################################################
app = Flask(__name__)
manager = Manager()
responses_results = manager.dict()
queue = Queue()

ROOT_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))

####################################################################################################
# Routes
####################################################################################################


@app.route('/v1/objects/hosts/<host>', methods=["PUT"])
def create_host(host):
    sleep(1)
    return jsonify({"results": ["paragasessa"]})


@app.route('/v1/objects/services/<host>!<service>', methods=["PUT"])
예제 #5
0
파일: io.py 프로젝트: prabindh/bonito
 def __init__(self, directory, maxsize=5):
     super().__init__()
     self.directory = directory
     self.queue = Queue(maxsize)
예제 #6
0
    if OperSys == 'Windows':
        TargetDir = '\\\\INSTRUCTOR\\Shared\\' + Host + '\\'
        #TargetDir = '\\\\BUTTON\\Shared\\' + Host + '\\'
    else:
        TargetDir = '/mnt/hgfs/\\\\INSTRUCTOR/' + Host + '/'
   
    if not os.path.isdir(TargetDir):
        os.mkdir(TargetDir)

    start_timer()    
    NumProcs = 2

    for i in range(0,10):
        print 'Loop',i
        queue  = Queue()
        rqueue = Queue()

        Pids = []

        for i in range(0,NumProcs):
            Pids.append(Process(target=WorkerProcess, args=(TargetDir,queue,rqueue)))
    
        rth = Process(target=RemoveProcess, args=(TargetDir,rqueue))

        for th in Pids:
            th.start()

        rth.start()
    
        for FName in glob.iglob(Source):    
예제 #7
0
    def doAllPossibleActions(self):
        if not self.fileOutputDir:
            # Get a file output location
            self.setOutputDir()
            if self.fileOutputDir is None or not self.fileOutputDir:
                msg = QMessageBox()
                msg.setWindowTitle("Error")
                msg.setText("No Output Directory Selected!")
                msg.exec()
                self.updateText("No actions selected!")
                return None
        p = 0

        r = re.compile("[\\:\\w&.\\-\\/]+MyVideos\\d+.db$")
        videoDatabase = list(filter(r.match, self.databases))[0]
        if self.allOptions['option1'].isChecked():
            # If a current job is running, kill it!
            if self.option1Timer is not None and self.option1Timer.isActive():
                self.option1Timer.stop()
            if self.activeProcess is not None and self.activeProcess.is_alive(
            ):
                self.updateText("Killing active task...")
                self.activeProcess.terminate()
                self.updateText("Success! Starting new requested tasks...")
            # Establish a connection to the movie database
            connection = sqlite3.connect(videoDatabase)
            cursor = connection.cursor()
            # Get the movie name, movie path, movie filename
            cursor.execute("""SELECT DISTINCT strFilename,strPath FROM files
                            INNER JOIN movie
                            ON files.idFile = movie.idFile
                            INNER JOIN path 
                            ON files.idPath = path.idPath;""")
            allKnownMovies = cursor.fetchall()
            self.updateText("Found {} files in Kodi...".format(
                len(allKnownMovies)))
            connection.close()
            p += 1
            mediaDirectory = QFileDialog.getExistingDirectory(
                self, "Select Media Directory To Scan")
            if mediaDirectory == "":
                self.updateText(
                    "Media directory cannot be ignored for option 1! Skipping..."
                )
            else:
                # Definitely broken!
                # Start a thread safe queue
                self.queue = Queue()
                # Setup a timer
                self.option1Timer = QTimer(self)
                self.option1Timer.setInterval(1000)
                self.option1Timer.timeout.connect(
                    self.updateMovieDirectoriesScanned)
                # Spin up a process
                self.activeProcess = VideoWorker(
                    self.queue, mediaDirectory, allKnownMovies,
                    self.fileOutputDir + "/Missing Movies.csv")
                # Start a timer for checking and updating the files found marker
                self.activeProcess.start()
                self.option1Timer.start()

        if self.allOptions['option2'].isChecked():
            # Establish a connection to the movie database
            connection = sqlite3.connect(videoDatabase)
            cursor = connection.cursor()
            # Get the movie name, movie path, movie filename
            cursor.execute("SELECT DISTINCT c00 FROM movie;")
            allKnownMovies = sorted(cursor.fetchall())
            allKnownMovies = list(
                map(lambda nameTup: '"{}"'.format(nameTup[0]), allKnownMovies))
            # Point out possible duplicates?
            cursor.execute("""SELECT movie.c00,cnt,strPath FROM movie
                            INNER JOIN files
                            ON files.idFile = movie.idFile
                            INNER JOIN path
                            ON path.idPath = files.idPath
                            LEFT JOIN (
                            SELECT c00, COUNT(c00) AS cnt FROM movie 
                            GROUP BY c00
                            ) AS temptable
                            ON movie.c00 = temptable.c00
                            WHERE cnt > 1;""")
            duplicates = sorted(cursor.fetchall())
            duplicates = list(
                map(
                    lambda entry: ",".join(
                        list(map(lambda item: '"{}"'.format(item), entry))),
                    duplicates))
            self.updateText("Writing out a CSV of movie names from Kodi...")
            with open("{}/All Movies.csv".format(self.fileOutputDir),
                      "w") as outfile:
                outfile.write("Title\n")
                outfile.write("\n".join(allKnownMovies))
            self.updateText("{}/All Movies.csv".format(self.fileOutputDir))
            with open("{}/All Duplicate Movies.csv".format(self.fileOutputDir),
                      "w") as outfile:
                outfile.write("Title,Total,Path\n")
                outfile.write("\n".join(duplicates))
            self.updateText("{}/All Duplicate Movies.csv".format(
                self.fileOutputDir))
            # Get the movie name, movie path, movie filename
            p += 1
            # self.updateText("Found {} movies logged by Kodi...".format(numFound))
            # Write out the file
        if self.allOptions['option3'].isChecked():
            connection = sqlite3.connect(videoDatabase)
            cursor = connection.cursor()
            # cursor.execute("SELECT idMovie, idFile, c00 FROM movie")
            # allKnownShows = cursor.fetchall()
            connection.close()
            self.updateText("Option 3 is not yet available")
            p += 1
            # self.updateText("Found {} missing seasons".format(numMissing))
            # Write out the file

        if p == 0:
            msg = QMessageBox()
            msg.setWindowTitle("Error")
            msg.setText("No Actions Selected!")
            msg.exec()
            self.updateText("No actions selected!")
        return None
                        dest='num_workers',
                        type=int,
                        default=2,
                        help='Number of workers.')
    parser.add_argument('-q-size',
                        '--queue-size',
                        dest='queue_size',
                        type=int,
                        default=5,
                        help='Size of the queue.')
    args = parser.parse_args()

    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)

    input_q = Queue(maxsize=args.queue_size)
    output_q = Queue(maxsize=args.queue_size)
    pool = Pool(args.num_workers, worker, (input_q, output_q))

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()
    fps = FPS().start()

    while True:  # fps._numFrames < 120
        frame = video_capture.read()
        input_q.put(frame)

        t = time.time()

        cv2.imshow('Video', output_q.get())
예제 #9
0
        updated_bottom_right_y = bottom_right_y + (int)(
            (bottom_right_y - top_left_y) * 0.05)
        single_face_coordination = [
            updated_top_left_x, updated_top_left_y, updated_bottom_right_x,
            updated_bottom_right_y
        ]
        face_list.append(single_face_coordination)
        cv2.rectangle(frame, (updated_top_left_x, updated_top_left_y),
                      (updated_bottom_right_x, updated_bottom_right_y),
                      (255, 255, 0), FACE_RECTANGLE_THICKNESS)
    return face_list


if __name__ == '__main__':
    #Queue to communicate between two threads
    q_video2algo = Queue()
    q_algo2video = Queue()
    video = Process(target=proc_video, args=(
        q_video2algo,
        q_algo2video,
    ))
    algorithm = Process(target=proc_algorithm,
                        args=(
                            q_video2algo,
                            q_algo2video,
                        ))

    #Start two threads
    print('Info: Start thread to read video successfully.')
    video.start()
    print('Info: Start thread to recognize face successfully.')
def confirmUserInput(userInputType, userInput, speechInput, speaker,
                     userInputInitialise):
    print 'in confirmUserInput function'
    proceedAhead = False
    revertPrevProc = True

    def inputType(userInputType):
        inputQuery = None
        if userInputType == 'confirmBuildingQuery':
            inputQuery = 'please confirm building name %s' % userInput
        elif userInputType == 'confirmLevelQuery':
            inputQuery = 'please confirm current level %s' % userInput
        elif userInputType == 'confirmBuildingDestQuery':
            inputQuery = 'please confirm destination building %s' % userInput
        elif userInputType == 'confirmLevelDestQuery':
            inputQuery = 'please confirm destination level %s' % userInput
        elif userInputType == 'confirmStartQuery':
            inputQuery = 'please confirm start location %s' % userInput
        elif userInputType == 'confirmDestQuery':
            inputQuery = 'please confirm destination location is %s' % userInput
        elif userInputType == 'confirmStartNavigation':
            inputQuery = 'please acknowledge to begin navigation guidance'
        else:
            pass

        return inputQuery

    needToRepeatInput = False
    while revertPrevProc:
        print 'in revertPrevProc loop'
        revertPrevProc = False
        while not proceedAhead:
            if needToRepeatInput:
                print 'pls repeat input'
                #userInput = raw_input()
                userInput = receiveUserInput('requestRepeatQuery', speechInput,
                                             speaker, userInputInitialise)
                userInput = stringParser(userInput)
                if (userInputType == 'confirmStartQuery'
                        or userInputType == 'confirmDestQuery'):
                    userInput = 'node %s' % userInput

            inputQuery = inputType(userInputType)
            speaking_proc = Process(target=speaker.threadedFeedback,
                                    args=(inputQuery, ))
            speaking_proc.start()
            speaking_proc.join()

            if speechInput is None:
                voiceInput, numpadData = userInputInitialise.location_input()
                if len(numpadData) == 0 or numpadData[0] == 2:
                    revertPrevProc = True
                    needToRepeatInput = True
                elif numpadData[0] == 1:
                    proceedAhead = True
                else:
                    pass
            else:
                inputAnswer = Queue()
                recog_proc = Process(target=speechInput.speechRecognise,
                                     args=(inputAnswer, ))
                recog_proc.start()
                recog_proc.join()
                print 'processing inputAnswer'
                testInputAnswer = inputAnswer.get()
                print 'inputAnwer is ', testInputAnswer
                if testInputAnswer == 'AFFIRMATIVE':
                    proceedAhead = True
                elif testInputAnswer == 'NEGATIVE':
                    print 'acknowledged as needToRepeatInput'
                    revertPrevProc = True
                    needToRepeatInput = True
                else:
                    print 'inputAnswer is neither affirmative nor negative'
                    pass
    #if userInput is None:

#	userInput = True
    print 'userInput is', userInput
    return userInput
import time
from multiprocessing import Process, JoinableQueue, Queue
from random import random
from scipy import misc

batch_img_queue = JoinableQueue(maxsize=10)
midle_reulst_queue = JoinableQueue()
cls_queen = Queue()


def msg(flag, info):
    print flag, info


def pre_process(batch_img_queue):
    for i in range(10):
        msg('pre_process', 'prepare bat_img....')
        time.sleep(0.2)
        img = misc.imread('dog.jpg')
        batch_img_queue.put(img)
    batch_img_queue.put(None)


def batch_extractFeature(batch_img_queue, midle_reulst_queue):
    while 1:
        msg('batch_extractFeature', 'extracting feature.....')
        time.sleep(0.5)
        img_bat = batch_img_queue.get()
        if img_bat is None:
            midle_reulst_queue.put(None)
            break
예제 #12
0
def enumerate_logs(limit=None, tws=None):
    """
    Calls _enumerate_logs() via a multiprocessing Process() so it doesn't cause
    the IOLoop to block.

    Log objects will be returned to the client one at a time by sending
    'logging_log' actions to the client over the WebSocket (*tws*).
    """
    # Sometimes IOLoop detects multiple events on the fd before we've finished
    # doing a get() from the queue.  This variable is used to ensure we don't
    # send the client duplicates:
    results = []
    user = tws.get_current_user()['upn']
    users_dir = os.path.join(tws.settings['user_dir'], user)  # "User's dir"
    io_loop = tornado.ioloop.IOLoop.instance()
    global PROCS
    if user not in PROCS:
        PROCS[user] = {}
    else:  # Cancel anything that's already running
        io_loop.remove_handler(PROCS[user]['queue']._reader.fileno())
        if PROCS[user]['process']:
            PROCS[user]['process'].terminate()
    PROCS[user]['queue'] = q = Queue()
    PROCS[user]['process'] = Process(target=_enumerate_logs,
                                     args=(q, user, users_dir, limit))

    def send_message(fd, event):
        """
        Sends the log enumeration result to the client.  Necessary because
        IOLoop doesn't pass anything other than *fd* and *event* when it handles
        file descriptor events.
        """
        message = q.get()
        if message == 'complete':
            io_loop.remove_handler(fd)
            logs_dir = os.path.join(users_dir, "logs")
            log_files = os.listdir(logs_dir)
            total_bytes = 0
            for log in log_files:
                log_path = os.path.join(logs_dir, log)
                total_bytes += os.stat(log_path).st_size
            out_dict = {
                'total_logs': len(log_files),
                'total_bytes': total_bytes
            }
            # This signals to the client that we're done
            message = {'logging_logs_complete': out_dict}
            tws.write_message(message)
            return
        message = json_encode(message)
        if message not in results:
            if results:
                results.pop()  # No need to keep old stuff hanging around
            results.append(message)
            tws.write_message(message)

    # This is kind of neat:  multiprocessing.Queue() instances have an
    # underlying fd that you can access via the _reader:
    io_loop.add_handler(q._reader.fileno(), send_message, io_loop.READ)
    # We tell the IOLoop to watch this fd to see if data is ready in the queue.
    PROCS[user]['process'].start()
예제 #13
0
    def testAlerting(self):

        # Adapt the user to an existing user
        username = '******'
        usr = user.User(username)
        testSite = website.Website(
            name='test',
            url='https://support.google.com/merchants/answer/160038?hl=en')
        usr.mySites['test'] = testSite
        print(usr)

        #Queues
        queueTwoMin = Queue()
        queueTenMin = Queue()
        queueHour = Queue()
        queueAlerts = Queue()

        # Triggers the monitoring
        testProcess = Process(target=monitor.startMonitor,
                              args=(usr, queueTwoMin, queueTenMin, queueHour,
                                    queueAlerts))
        testProcess.start()

        # Wait for some time
        print('Waiting a bit…')
        print(str(usr.mySites['test']))
        time.sleep(120)

        # End the process
        testProcess.terminate()

        # Put invalid url in the sites
        websitename = list(usr.mySites.keys())[0]
        usr.mySites[
            websitename].url = 'https://support.google.com/answer/160038?hl=en'
        print(str(usr.mySites[websitename]))

        # Retriggers the monitoring
        testProcess = Process(target=monitor.startMonitor,
                              args=(usr, queueTwoMin, queueTenMin, queueHour,
                                    queueAlerts))
        testProcess.start()

        # Wait for some time
        print('Waiting for the alert DOWN to come up')
        time.sleep(200)

        # End the process
        testProcess.terminate()

        # Get the Alert down - Make it possible to raise the up alert
        # Up alerts can't be raised if a down alert is not present in the alertDic
        alertDown = queueAlerts.get()
        testDic = {'test': alertDown}
        queueAlerts.put(alertDown)

        # Put valid url in the sites
        usr.mySites[
            websitename].url = 'https://support.google.com/merchants/answer/160038?hl=en'
        print(str(usr.mySites[websitename]))

        # Retriggers the monitoring
        testProcess = Process(target=monitor.startMonitor,
                              args=(usr, queueTwoMin, queueTenMin, queueHour,
                                    queueAlerts, testDic))
        testProcess.start()

        # Wait for some time
        print('Waiting for the alert UP to come up')
        time.sleep(300)

        # End the process
        testProcess.terminate()

        # Get all the alerts triggered
        alertsTriggered = []
        testAlerts = []
        while not queueAlerts.empty():
            alertsTriggered.insert(0, queueAlerts.get())

        # Get all the alerts tested, there may be some more
        for alert in alertsTriggered:
            if alert['website'] == websitename:
                testAlerts.append(alert)

        # Get the Status of the alerts tested
        statusTestedAlerts = []
        for alert in testAlerts:
            statusTestedAlerts.append(alert['status'])

        print(alertsTriggered)
        print(testAlerts)

        # Assertions - Only 2 alerts, one up one down
        self.assertEqual(len(testAlerts), 2)
        self.assertTrue('UP' in statusTestedAlerts)
        self.assertTrue('DOWN' in statusTestedAlerts)
예제 #14
0
    def __init__(self,
                 core_queue=None,
                 gui_queue=None,
                 gui_id=None,
                 gui_data=None,
                 is_parent=False,
                 parent=None):
        """
        Constructor of this class. Called to initialize the GUI.

        This constructor is called within an own process which is reliable for the GUI and all graphical plugins.

        :param core_queue: Queue used to send PaPI events to the Core
        :param gui_queue: GUI queue which contains PaPI events for the GUI
        :param gui_id: Unique ID for this gui
        :param gui_data: Contains all data for the current session
        :param parent: parent element
        :return:
        """

        super(GUI, self).__init__(parent)
        self.setupUi(self)
        self.is_parent = is_parent

        # Create a data structure for gui if it is missing
        # -------------------------------------------------- #
        if not isinstance(gui_data, DGui):
            self.gui_data = DGui()
        else:
            self.gui_data = gui_data

        # check if gui should be the parent process or core is the parent
        # start core if gui is parent
        # -------------------------------------------------- #
        self.core_process = None
        if is_parent:
            core_queue_ref = Queue()
            gui_queue_ref = Queue()
            gui_id_ref = 1
            self.core_process = Process(target=run_core_in_own_process,
                                        args=(gui_queue_ref, core_queue_ref,
                                              gui_id_ref))
            self.core_process.start()
        else:
            if core_queue is None:
                raise Exception('Gui started with wrong arguments')
            if gui_queue is None:
                raise Exception('Gui started with wrong arguments')
            if not isinstance(gui_id, str):
                raise Exception('Gui started with wrong arguments')

            core_queue_ref = core_queue
            gui_queue_ref = gui_queue
            gui_id_ref = gui_id

        # Create the Tab Manager and the gui management unit #
        # connect some signals of management to gui          #
        # -------------------------------------------------- #
        self.TabManager = PaPITabManger(tabWigdet=self.widgetTabs,
                                        centralWidget=self.centralwidget)

        self.gui_management = GuiManagement(core_queue_ref, gui_queue_ref,
                                            gui_id_ref, self.TabManager,
                                            self.get_gui_config,
                                            self.set_gui_config)

        self.TabManager.gui_api = self.gui_management.gui_api
        self.TabManager.dGui = self.gui_management.gui_data

        self.gui_management.gui_event_processing.added_dplugin.connect(
            self.add_dplugin)
        self.gui_management.gui_event_processing.removed_dplugin.connect(
            self.remove_dplugin)
        self.gui_management.gui_event_processing.dgui_changed.connect(
            self.triggered_changed_dgui)
        self.gui_management.gui_event_processing.plugin_died.connect(
            self.triggered_plugin_died)

        self.gui_management.gui_api.error_occured.connect(
            self.triggered_error_occurred)

        signal.signal(signal.SIGINT, lambda a, b: self.signal_handler())

        # List for keys that are active
        self.keysActiveList = []

        # -------------------------------------
        # Create placeholder
        # -------------------------------------
        self.overview_menu = None
        self.create_plugin_menu = None
        self.plugin_create_dialog = None

        self.log = None
        self.last_config = None
        self.in_run_mode = None
        self.workingTimer = None

        # initialize the graphic of the gui
        # -------------------------------------------------- #
        self.init_gui_graphic()
예제 #15
0
                            records_read=len(incoming_filenames),
                            records_written=len(local_filenames),
                            bad_records=1)
                        download_stats.save()
                        return 2
            downloaded_bytes = sum(
                [os.path.getsize(f) for f in local_filenames])
            download_stats.increment(records_read=len(incoming_filenames),
                                     records_written=len(local_filenames),
                                     bytes_read=downloaded_bytes,
                                     bytes_written=downloaded_bytes)
            logger.log(download_stats.get_summary())
            download_stats.save()
            after_download = now()

            raw_files = Queue()
            for l in local_filenames:
                raw_files.put(l)

            completed_files = Queue()

            # Begin reading raw input
            raw_readers = start_workers(
                logger, num_cpus, "Reader", ReadRawStep, raw_files,
                (completed_files, args.log_file, args.stats_file, schema,
                 converter, storage, args.bad_data_log))

            # Tell readers to stop when they get to the end:
            finish_queue(raw_files, num_cpus)

            # Compress completed files.
예제 #16
0
    lines = []
    for path in paths:
        lines.extend(l.strip() for l in path.open())
    query = query_q.get()
    while query:
        results_q.put([l for l in lines if query in l])
        query = query_q.get()


from multiprocessing import Process, Queue, cpu_count
from path import path
cpus = cpu_count()

pathnames = [f for f in path('.').listdir() if f.isfile()]
paths = [pathnames[i::cpus] for i in range(cpus)]
query_queues = [Queue() for p in range(cpus)]
results_queue = Queue()
'''
subclass process
'''

# Although the simplest way to start a job in a separate process is to use Process and pass a target function, it is also possible to use a custom subclass.

import multiprocessing


class Worker(multiprocessing.Process):
    def run(self):
        print('In {}'.format(self.name))
        return
예제 #17
0
edges = np.vstack({tuple(sorted(row))
                   for row in tieline[:,
                                      2:4]}) if tieline.any() else np.array([])
pipes = {}
for edge in edges.tolist():
    fend, tend = Pipe()
    if edge[0] not in pipes:
        pipes[edge[0]] = {}
    pipes[edge[0]][edge[1]] = fend
    if edge[1] not in pipes:
        pipes[edge[1]] = {}
    pipes[edge[1]][edge[0]] = tend

##----subproblem configuration including local opf and communication pipes-----
problem = []
output = Queue()
for i in range(na):
    s = opf_admm_model()
    s.config(i + 1, op, bus, gen, gencost, Ybus, genBus, tieline, pipes, na)
    s.var_init()
    problem.append(s)

##----- run each worker in parallel ---------
procs = []
for i in range(na):
    procs += [Process(target=runWorker, args=(i + 1, problem[i], output))]

start_time = time.time()
start_clock = time.clock()
for proc in procs:
    proc.start()
## ===


def main():
    bt = Thread(target=background)
    bt.start()
    # This is NOT a good socket location for production, it is for
    # playing around.  I suggest /var/run/milter/myappnamesock for production.
    #  socketname = os.path.expanduser('~/pythonsock')
    socketname = "inet:8800"
    timeout = 600
    # Register to have the Milter factory create instances of your class:
    Milter.factory = myMilter
    flags = Milter.CHGBODY + Milter.CHGHDRS + Milter.ADDHDRS
    flags += Milter.ADDRCPT
    flags += Milter.DELRCPT
    Milter.set_flags(flags)  # tell Sendmail which features we use
    print("%s milter startup" % time.strftime('%Y%b%d %H:%M:%S'))
    sys.stdout.flush()
    Milter.runmilter("pythonfilter", socketname, timeout)
    logq.put(None)
    bt.join()
    print("%s bms milter shutdown" % time.strftime('%Y%b%d %H:%M:%S'))


if __name__ == "__main__":
    # You probably do not need a logging process, but if you do, this
    # is one way to do it.
    logq = Queue(maxsize=4)
    main()
예제 #19
0
    def search_single_record(self,
                             rec,
                             n_parallel_words=1,
                             word_limit=None,
                             process_timeout=None,
                             maximum_matches=1000,
                             filter=None):
        if n_parallel_words is None:
            n_parallel_words = cpu_count()

        if word_limit is None:
            word_limit = self.N

        initial_q = managerQueue.Queue()

        [
            initial_q.put({field_name: rec[field_name]})
            for field_name in self.index_names[:word_limit]
        ]

        # enqueue a sentinel value so we know we have reached the end of the queue
        initial_q.put('STOP')
        queue_empty = False

        # create an empty queue for results
        results_q = Queue()

        # create a set of unique results, using MongoDB _id field
        unique_results = set()

        l = list()

        while True:

            # build children processes, taking cursors from in_process queue first, then initial queue
            p = list()
            while len(p) < n_parallel_words:
                word_pair = initial_q.get()
                if word_pair == 'STOP':
                    # if we reach the sentinel value, set the flag and stop queuing processes
                    queue_empty = True
                    break
                if not initial_q.empty():
                    p.append(
                        Process(target=get_next_match,
                                args=(results_q, word_pair, self.collection,
                                      np.array(rec['signature']),
                                      self.distance_cutoff, maximum_matches)))

            if len(p) > 0:
                for process in p:
                    process.start()
            else:
                break

            # collect results, taking care not to return the same result twice

            num_processes = len(p)

            while num_processes:
                results = results_q.get()
                if results == 'STOP':
                    num_processes -= 1
                else:
                    for key in results.keys():
                        if key not in unique_results:
                            unique_results.add(key)
                            l.append(results[key])

            for process in p:
                process.join()

            # yield a set of results
            if queue_empty:
                break

        return l
예제 #20
0
파일: kgtkwriter.py 프로젝트: yyht/kgtk
    def _setup(cls,
               column_names: typing.List[str],
               file_path: typing.Optional[Path],
               who: str,
               file_out: typing.TextIO,
               require_all_columns: bool,
               prohibit_extra_columns: bool,
               fill_missing_columns: bool,
               error_file: typing.TextIO,
               header_error_action: ValidationAction,
               gzip_in_parallel: bool,
               gzip_queue_size: int,
               column_separator: str,
               mode: Mode = Mode.AUTO,
               output_format: typing.Optional[str] = None,
               output_column_names: typing.Optional[typing.List[str]] = None,
               old_column_names: typing.Optional[typing.List[str]] = None,
               new_column_names: typing.Optional[typing.List[str]] = None,
               verbose: bool = False,
               very_verbose: bool = False,
    )->"KgtkWriter":

        if output_format is None:
            output_format = cls.OUTPUT_FORMAT_DEFAULT
            if verbose:
                print("Defaulting the output format to %s" % output_format, file=error_file, flush=True)

        if output_format == cls.OUTPUT_FORMAT_CSV:
            column_separator = "," # What a cheat!
                
        if output_column_names is None:
            output_column_names = column_names
        else:
            # Rename all output columns.
            if len(output_column_names) != len(column_names):
                raise ValueError("%s: %d column names but %d output column names" % (who, len(column_names), len(output_column_names)))

        if old_column_names is not None or new_column_names is not None:
            # Rename selected output columns:
            if old_column_names is None or new_column_names is None:
                raise ValueError("%s: old/new column name mismatch" % who)
            if len(old_column_names) != len(new_column_names):
                raise ValueError("%s: old/new column name length mismatch: %d != %d" % (who, len(old_column_names), len(new_column_names)))

            # Rename columns in place.  Start by copyin the output column name
            # list so the changes don't inadvertantly propogate.
            output_column_names = output_column_names.copy()
            column_name: str
            idx: int
            for idx, column_name in enumerate(old_column_names):
                if column_name not in output_column_names:
                    raise ValueError("%s: old column names %s not in the output column names." % (who, column_name))
                output_column_names[output_column_names.index(column_name)] = new_column_names[idx]
                

        # Build a map from column name to column index.  This is used for
        # self.writemap(...)  and self.build_shuffle_list(...)
        column_name_map: typing.Mapping[str, int] = cls.build_column_name_map(column_names,
                                                                              header_line=column_separator.join(column_names),
                                                                              who=who,
                                                                              error_action=header_error_action,
                                                                              error_file=error_file)

        # Build a header line for error feedback:
        header: str = column_separator.join(output_column_names)

        # Build a map from output column name to column index.
        output_column_name_map: typing.Mapping[str, int] = cls.build_column_name_map(output_column_names,
                                                                                     header_line=header,
                                                                                     who=who,
                                                                                     error_action=header_error_action,
                                                                                     error_file=error_file)

        # Should we automatically determine if this is an edge file or a node file?
        is_edge_file: bool = False
        is_node_file: bool = False
        if mode is KgtkWriter.Mode.AUTO:
            # If we have a node1 (or alias) column, then this must be an edge file. Otherwise, assume it is a node file.
            node1_idx: int = cls.get_column_idx(cls.NODE1_COLUMN_NAMES, output_column_name_map,
                                                header_line=header,
                                                who=who,
                                                error_action=header_error_action,
                                                error_file=error_file,
                                                is_optional=True)
            is_edge_file = node1_idx >= 0
            is_node_file = not is_edge_file
        elif mode is KgtkWriter.Mode.EDGE:
            is_edge_file = True
        elif mode is KgtkWriter.Mode.NODE:
            is_node_file = True
        elif mode is KgtkWriter.Mode.NONE:
            pass
        
        # Validate that we have the proper columns for an edge or node file,
        # ignoring the result.
        cls.get_special_columns(output_column_name_map,
                                header_line=header,
                                who=who,
                                error_action=header_error_action,
                                error_file=error_file,
                                is_edge_file=is_edge_file,
                                is_node_file=is_node_file)

        gzip_thread: typing.Optional[GzipProcess] = None
        if gzip_in_parallel:
            if verbose:
                print("Starting the gzip process.", file=error_file, flush=True)
            gzip_thread = GzipProcess(file_out, Queue(gzip_queue_size))
            gzip_thread.start()

        kw: KgtkWriter = cls(file_path=file_path,
                             file_out=file_out,
                             column_separator=column_separator,
                             column_names=column_names,
                             column_name_map=column_name_map,
                             column_count=len(column_names),
                             require_all_columns=require_all_columns,
                             prohibit_extra_columns=prohibit_extra_columns,
                             fill_missing_columns=fill_missing_columns,
                             error_file=error_file,
                             header_error_action=header_error_action,
                             gzip_in_parallel=gzip_in_parallel,
                             gzip_thread=gzip_thread,
                             gzip_queue_size=gzip_queue_size,
                             output_format=output_format,
                             output_column_names=output_column_names,
                             line_count=1,
                             verbose=verbose,
                             very_verbose=very_verbose,
        )
        kw.write_header()
        return kw
예제 #21
0
def main():
    global parent_pid
    parent_pid = os.getpid()
    args_info = ArgsProcessing(sys.argv[1:])  # 处理命令行参数
    work_mode = args_info.current_mode  # 工作模式
    log_file_list = args_info.log_file_list  # 所有的日志文件
    signal.signal(signal.SIGINT, signal_handler)  # SIGINT是ctrl+c发出的信号,值为2
    start_time = time.time()
    # 模式1:分析单笔交易 模式2:分析所有交易 模式3:分析单个区块 模式4:分析所有区块
    if work_mode == 1:
        tx_hash = args_info.tx_hash  # 交易Hash
        # 获取全部的字典格式的交易数据
        all_tx_dict_list = get_all_log_dict(log_file_list, 'transaction')
        overall_earliest_msg, overall_latest_msg = retrieve_earliest_latest_msg(
            all_tx_dict_list, tx_hash)
        if overall_earliest_msg and overall_latest_msg:
            print('最早: %s' % overall_earliest_msg)
            print('最晚: %s' % overall_latest_msg)
            interval_time = millisecond2time_format(
                calc_millisecond_interval(overall_latest_msg[0],
                                          overall_earliest_msg[0]))
            print('间隔: %s' % interval_time)
        else:
            print('The transaction %s was not found in log file!' % tx_hash)
    elif work_mode == 2:
        all_tx_dict_list = get_all_log_dict(log_file_list, 'transaction')
        all_tx_hash = []  # 所有的交易Hash
        for tx_dict in all_tx_dict_list:
            all_tx_hash.extend(list(tx_dict.keys()))
        # 去除重复元素
        all_tx_hash = list(set(all_tx_hash))
        broadcasting_time_queue = Queue()  # 存储广播时间的Queue
        processes = []
        # 获取cpu核心数
        processor_num = cpu_count()
        # 将所有的交易Hash分割为与cpu核心数相等的尽量平均的份数
        split_all_tx_hash = split_list(all_tx_hash, processor_num)
        for work_list in split_all_tx_hash:
            # 创建与交易Hash份数(cpu核心数)数量相同的子进程用于计算广播时间
            p = Process(target=calc_broadcasting_time,
                        args=(work_list, broadcasting_time_queue,
                              all_tx_dict_list))
            p.start()
            processes.append(p)
        for process in processes:
            # 等待所有子进程结束
            process.join()
        broadcasting_time_list = []
        while True:
            # 将子进程的分析结果存储到父进程中的列表
            broadcasting_time_list.append(broadcasting_time_queue.get())
            if broadcasting_time_queue.empty():
                break
        # 使用最小堆对所有子进程生成的有序列表合并
        broadcasting_time_list = list(heapq.merge(*broadcasting_time_list))
        # 计算广播时间列表平均值和中位数
        average, median = get_average_median(broadcasting_time_list)
        print('最短时间: %s' % millisecond2time_format(broadcasting_time_list[0]))
        print('最长时间: %s' % millisecond2time_format(broadcasting_time_list[-1]))
        print('平均值:   %s' % millisecond2time_format(average))
        print('中位数:   %s' % millisecond2time_format(median))
    elif work_mode == 3:
        height = args_info.height  # 区块高度
        all_block_dict_list = get_all_log_dict(log_file_list, 'block')
        overall_earliest_msg, overall_latest_msg = retrieve_earliest_latest_msg(
            all_block_dict_list, height)
        if overall_earliest_msg and overall_latest_msg:
            print('最早: %s' % overall_earliest_msg)
            print('最晚: %s' % overall_latest_msg)
            interval_time = millisecond2time_format(
                calc_millisecond_interval(overall_latest_msg[0],
                                          overall_earliest_msg[0]))
            print('间隔: %s' % interval_time)
        else:
            print('The block height %s was not found in log file!' % height)
    elif work_mode == 4:
        all_block_dict_list = get_all_log_dict(log_file_list, 'block')
        all_block_height = []
        for block_dict in all_block_dict_list:
            all_block_height.extend(list(block_dict.keys()))
        all_block_height = list(set(all_block_height))
        broadcasting_time_queue = Queue()
        processes = []
        processor_num = cpu_count()
        split_all_block_height = split_list(all_block_height, processor_num)
        for work_list in split_all_block_height:
            p = Process(target=calc_broadcasting_time,
                        args=(work_list, broadcasting_time_queue,
                              all_block_dict_list))
            p.start()
            processes.append(p)
        for process in processes:
            process.join()
        broadcasting_time_list = []
        while True:
            broadcasting_time_list.append(broadcasting_time_queue.get())
            if broadcasting_time_queue.empty():
                break
        broadcasting_time_list = list(heapq.merge(*broadcasting_time_list))
        average, median = get_average_median(broadcasting_time_list)
        print('最短时间: %s' % millisecond2time_format(broadcasting_time_list[0]))
        print('最长时间: %s' % millisecond2time_format(broadcasting_time_list[-1]))
        print('平均值:   %s' % millisecond2time_format(average))
        print('中位数:   %s' % millisecond2time_format(median))

    print('分析用时:', time.time() - start_time)
예제 #22
0
def main():
    """RTNM main function used for getting the users arguements and spawns processes for each
    connection and handles dispatching of responses into a worker pool for processing
    and uploading of the data

    """
    parser = ArgumentParser()
    parser.add_argument("-c",
                        "--config",
                        dest="config",
                        help="Location of the configuration file",
                        required=True)
    parser.add_argument("-b",
                        "--batch-size",
                        dest="batch_size",
                        type=int,
                        help="Batch size of the upload to ElasticSearch",
                        required=True)
    parser.add_argument(
        "-w",
        "--worker-pool-size",
        dest="worker_pool_size",
        type=int,
        help="Number of workers in the worker pool used for uploading")
    parser.add_argument("-v",
                        "--verbose",
                        dest="debug",
                        help="Enable debugging",
                        action="store_true")
    parser.add_argument("-r",
                        "--retry",
                        dest="retry",
                        help="Enable retrying",
                        action="store_true")
    args = parser.parse_args()
    try:
        if Path(args.config).is_file():
            inputs, outputs = generate_clients(args.config)
        else:
            raise IOError(f"File {args.config} doesn't exist")
    except ConfigError as error:
        parser.error(f"{error}")
    except KeyError as error:
        parser.error(
            f"Error in the configuration file: No key for {error}.\nCan't parse the config file"
        )
    except Exception as error:
        parser.error(f"{error}")

    path: Path = Path().absolute() / "logs"
    log_queue: Queue = Queue()
    log_name: str = f"rtnm-{args.config.strip('ini').strip('.').split('/')[-1]}"
    rtnm_log = init_logs(log_name, path, log_queue, args.debug)
    try:
        client_conns: List[Union[DialInClient, TLSDialInClient]] = []
        data_queue: Queue = Queue()
        rtnm_log.logger.info("Starting inputs and outputs")
        for client in inputs:
            if inputs[client]["dial"] == "in":
                inputs[client]["debug"] = args.debug
                inputs[client]["retry"] = args.retry
                if "pem-file" in inputs[client]:
                    with open(inputs[client]["pem-file"], "rb") as file_desc:
                        pem = file_desc.read()
                    rtnm_log.logger.info(
                        f"Creating TLS Connector for {client}")
                    client_conns.append(
                        TLSDialInClient(pem,
                                        data_queue,
                                        log_name,
                                        **inputs[client],
                                        name=client))
                else:
                    rtnm_log.logger.info(f"Creating Connector for {client}")
                    client_conns.append(
                        DialInClient(data_queue,
                                     log_name,
                                     **inputs[client],
                                     name=client))
            else:
                client_conns.append(
                    DialOutClient(data_queue, log_name, inputs[client],
                                  client))
        for client in client_conns:
            client.start()
        batch_list: List[Tuple[str, str, Optional[str], Optional[str],
                               str]] = []
        with Pool(processes=args.worker_pool_size) as worker_pool:
            while all([client.is_alive() for client in client_conns]):
                try:
                    data: Tuple[str, str, Optional[str], Optional[str],
                                str] = data_queue.get(timeout=10)
                    if data is not None:
                        batch_list.append(data)
                        if len(batch_list) >= args.batch_size:
                            rtnm_log.logger.debug("Uploading full batch size")
                            rtnm_log.logger.debug(batch_list)
                            result = worker_pool.apply_async(
                                process_and_upload_data,
                                args=[*batch_list, log_name, outputs])
                            batch_list.clear()
                except Empty:
                    if len(batch_list) != 0:
                        rtnm_log.logger.debug(
                            f"Uploading data of length {len(batch_list)}")
                        result = worker_pool.apply_async(
                            process_and_upload_data,
                            args=[*batch_list, log_name, outputs])
                        batch_list.clear()
                except Exception as error:
                    rtnm_log.logger.error(error)
                    rtnm_log.logger.error(
                        "Error during worker pool, going to cleanup")
                    for client in client_conns:
                        client.terminate()
    except Exception as error:
        rtnm_log.logger.error(error)
    except KeyboardInterrupt as error:
        rtnm_log.logger.error("Shutting down due to user ctrl-c")
    finally:
        rtnm_log.logger.info("In cleanup")
        rtnm_log.queue.put(None)
        for client in client_conns:
            client.terminate()
예제 #23
0
        if j == 0 and len(t2) != 0:
            for k in range(0, len(t2)):
                print("////////// Home", t2[k][0], "gives", round(t2[k][1], 3),
                      "kW to home", t2[k][2], "//////////")
            print((" "))


if __name__ == "__main__":
    ti = time.time()

    lockQueue = Lock()
    lockCount = Lock()
    lockWrite = Lock()

    queue = Queue()
    q = Queue()
    queue_echange = Queue()

    clock_ok = Event()
    weather_ok = Event()

    count = Value('i', 0)
    temp = Array('f', range(2))
    wind = Array('f', range(2))
    market_OK = Value('b', False)

    term_conn, markt_conn, = Pipe()
    term_conn2, weather_conn = Pipe()

    Homes = []
예제 #24
0
import cv2
import sys
import os
import argparse
import tensorflow as tf
import numpy as np
import facenet
import detect_face
import random
from time import sleep
import face_preprocess
from multiprocessing import Process, Queue
import time

q1 = Queue()
mtcnn_modelfile = "/home/ubuntu/Additive/cbir/model_check_points"
gpu_memory_fraction = 0.2


def main(q1):
    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = detect_face.create_mtcnn(sess, mtcnn_modelfile)

    minsize = 20
    threshold = [0.6, 0.7, 0.7]
예제 #25
0
from camera import camera
import utils
from utils import CommonLogger, init_logger


LOG_FILENAME = '/home/pi/logs/navi.log'
logger = init_logger(logging.getLogger(__name__), LOG_FILENAME)
sys.stdout = CommonLogger(logger, logging.INFO)
sys.stderr = CommonLogger(logger, logging.ERROR)

STEP_LENGTH = 40.0
ANGLE_THRESHOLD = 10
FOOT_SENSOR_ID = 0
BACK_SENSOR_ID = 1
GPIO_OVERRIDE_PIN = 17
QUEUE = Queue()


class Navigator(object):

    def __init__(self, logger):
        self.log = logger
        self.log.info('Starting navigator...')
        self.db = DB(logger=logger)
        self.maps = MapsRepo()
        self.audio = AudioDriver()
        self.sc = StepCounter(logger)
        self.hc = HeadingCalculator(logger)
        self.cam = Process(target=camera, name="Camera", args=(QUEUE,))
        self.current_prompt = None
        self.navi_chunk_finished = False
    def __init__(self):

        # Class name
        self.__name = 'bfx_ws'
        print(self.__name + ' thread - initializing ... ', end='')

        # Internal class variables
        self.__key = bfx_api_pkey
        self.__skey = bfx_api_skey

        # Internal status variables
        self._isActive = False

        # Internal class events
        self._connected = Event()
        self._disconnected = Event()
        self._pause = Event()

        # Channel mappings
        self._channel_ids = {}

        # Event handlers
        self._event_handlers = {
            'info': self.__handle_event_info,
            'auth': self.__handle_event_auth,
            'subscribed': self.__handle_event_subscribed
        }

        # Data handlers
        self._data_handlers = {
            'account': self.__process_data_account,
            'ticker': self.__process_data_ticker,
            'trades': self.__process_data_trades
        }

        # Data handlers
        self._data_account_handlers = {
            'ps': self.__handle_data_account_ps,
            'ws': self.__handle_data_account_ws,
            'os': self.__handle_data_account_os,
            'fcs': self.__handle_data_account_fcs,
            'fls': self.__handle_data_account_fls,
            'fos': self.__handle_data_account_fos
        }

        # Data queues
        self.data_queue = Queue()

        # Data Grids
        self.account_orders = {}
        self.account_funding_positions = {}

        # Websocket specific variables
        self.ws_version = None
        self.ws_userid = None

        # Establish as new independent thread
        Thread.__init__(self)
        self.daemon = True
        print('done.')

        # Create a logging object to store all incoming messages from the websocket
        self.store_raw = False
        if self.store_raw:
            self.data_raw_log = logging_handler.setup_logger(
                self.__name, 'raw_data.log')
예제 #27
0
 def _init_queues(self):
     """
     Sets up shared queues for inter-process communication.
     """
     self._input_queue = Queue()
     self._output_queue = Queue()
#   Created by Elshad Karimov on 31/05/2020.
#   Copyright © 2020 AppMillers. All rights reserved.

# How to use multiprocessing.Queue as a FIFO queue:

from multiprocessing import Queue

customQueue = Queue(maxsize=3)
customQueue.put(1)
print(customQueue.get())
예제 #29
0

# 读数据进程执行的代码
def read(q):
    while True:
        value = q.get(True)
        print 'read %s from queue.' % value
        if q.get(True) == 'Done':
            break


if __name__ == '__main__':
    # 父进程创建Queue,并传给各个子进程
    #Queue(maxsize=0)
    #    Returns a queue object
    q = Queue()

    #__init__(self, group=None, target=None, name=None, args=(), kwargs={})
    pw = Process(target=write, args=(q, ))
    pr = Process(target=read, args=(q, ))

    # 启动子进程pw,写入
    # Start child process
    pw.start()

    # 启动子进程pr,读取
    pr.start()

    # 等待pw结束
    # Wait until child process terminates
    pw.join()
예제 #30
0
파일: main.py 프로젝트: xiaqinga/test
from automator import Automator
from prop import END, RUN

from flusher import flush
from multiprocessing import Process, Queue

KEYBOARD = Queue()


def main(kb):
    # 连接 adb 。
    instance = Automator('127.0.0.1:7555', kb)

    # 启动脚本。
    instance.start()


if __name__ == '__main__':
    p = Process(target=main, args=(KEYBOARD, ))
    p.start()
    while True:
        flush()
        txt = input()
        if txt == END or txt.split(' ')[0] == RUN:
            KEYBOARD.put(txt)
            if txt == END:
                break
        else:
            KEYBOARD.put('')
    p.join()