Exemple #1
0
def setUpModule():
    global test_server, model, host, port, ssl_port

    utils.patch_auth()
    host = '127.0.0.1'
    port = utils.get_free_port('http')
    ssl_port = utils.get_free_port('https')
    test_server = utils.run_server(host, port, ssl_port, test_mode=True)
Exemple #2
0
def setup_server(environment='development'):
    global test_server, model, host, port, ssl_port

    patch_auth()
    host = '127.0.0.1'
    port = get_free_port('http')
    ssl_port = get_free_port('https')
    test_server = run_server(host, port, ssl_port, test_mode=True,
                             environment=environment)
def setUpModule():
    global test_server, model, host, port, ssl_port

    patch_auth(sudo=False)
    model = mockmodel.MockModel('/tmp/obj-store-test')
    host = '127.0.0.1'
    port = get_free_port('http')
    ssl_port = get_free_port('https')
    test_server = run_server(host, port, ssl_port, test_mode=True, model=model)
    def test_vm_livemigrate_persistent_API(self):
        patch_auth()

        inst = model.Model(libvirt_uri='qemu:///system',
                           objstore_loc=self.tmp_store)

        host = '127.0.0.1'
        port = get_free_port('http')
        ssl_port = get_free_port('https')
        cherrypy_port = get_free_port('cherrypy_port')

        with RollbackContext() as rollback:
            test_server = run_server(host, port, ssl_port, test_mode=True,
                                     cherrypy_port=cherrypy_port, model=inst)
            rollback.prependDefer(test_server.stop)

            self.request = partial(request, host, ssl_port)

            self.create_vm_test()
            rollback.prependDefer(utils.rollback_wrapper, self.inst.vm_delete,
                                  u'test_vm_migrate')

            # removing cdrom because it is not shared storage and will make
            # the migration fail
            dev_list = self.inst.vmstorages_get_list('test_vm_migrate')
            self.inst.vmstorage_delete('test_vm_migrate',  dev_list[0])

            try:
                self.inst.vm_start('test_vm_migrate')
            except Exception, e:
                self.fail('Failed to start the vm, reason: %s' % e.message)

            migrate_url = "/plugins/kimchi/vms/%s/migrate" % 'test_vm_migrate'

            req = json.dumps({'remote_host': KIMCHI_LIVE_MIGRATION_TEST,
                             'user': '******'})
            resp = self.request(migrate_url, req, 'POST')
            self.assertEquals(202, resp.status)
            task = json.loads(resp.read())
            wait_task(self._task_lookup, task['id'])
            task = json.loads(
                self.request(
                    '/plugins/kimchi/tasks/%s' % task['id'],
                    '{}'
                ).read()
            )
            self.assertEquals('finished', task['status'])

            try:
                remote_conn = self.get_remote_conn()
                rollback.prependDefer(remote_conn.close)
                remote_vm = remote_conn.lookupByName('test_vm_migrate')
                self.assertTrue(remote_vm.isPersistent())
                remote_vm.destroy()
                remote_vm.undefine()
            except Exception, e:
                self.fail('Migration test failed: %s' % e.message)
Exemple #5
0
def setUpModule():
    global test_server, model, host, port, ssl_port

    utils.patch_auth()
    model = kimchi.mockmodel.MockModel('/tmp/obj-store-test')
    host = '127.0.0.1'
    port = utils.get_free_port('http')
    ssl_port = utils.get_free_port('https')
    test_server = utils.run_server(host, port, ssl_port, test_mode=True,
                                   model=model)
Exemple #6
0
def setup_server(environment='development'):
    global test_server, model, host, port, ssl_port

    patch_auth()
    model = kimchi.mockmodel.MockModel('/tmp/obj-store-test')
    host = '127.0.0.1'
    port = get_free_port('http')
    ssl_port = get_free_port('https')
    test_server = run_server(host, port, ssl_port, test_mode=True, model=model,
                             environment=environment)
def setUpModule():
    global test_server, model, host, port, ssl_port, cherrypy_port

    patch_auth()
    model = Model(None, "/tmp/obj-store-test")
    host = "127.0.0.1"
    port = get_free_port("http")
    ssl_port = get_free_port("https")
    cherrypy_port = get_free_port("cherrypy_port")
    test_server = run_server(host, port, ssl_port, test_mode=True, cherrypy_port=cherrypy_port, model=model)
 def setUp(self):
     global host, port, ssl_port, model, test_server
     cherrypy.request.headers = {'Accept': 'application/json'}
     model = kimchi.mockmodel.MockModel('/tmp/obj-store-test')
     patch_auth()
     port = get_free_port('http')
     ssl_port = get_free_port('https')
     host = '127.0.0.1'
     test_server = run_server(host, port, ssl_port, test_mode=True,
                              model=model)
Exemple #9
0
def setUpModule():
    global host, port, ssl_port, model, test_server, fake_iso
    cherrypy.request.headers = {'Accept': 'application/json'}
    model = mockmodel.MockModel('/tmp/obj-store-test')
    patch_auth()
    port = get_free_port('http')
    ssl_port = get_free_port('https')
    host = '127.0.0.1'
    test_server = run_server(host, port, ssl_port, test_mode=True, model=model)
    fake_iso = '/tmp/fake.iso'
    open(fake_iso, 'w').close()
Exemple #10
0
def setUpModule():
    global test_server, model, host, port, ssl_port, cherrypy_port

    patch_auth()
    model = Model(None, '/tmp/obj-store-test')
    host = '127.0.0.1'
    port = get_free_port('http')
    ssl_port = get_free_port('https')
    cherrypy_port = get_free_port('cherrypy_port')
    test_server = run_server(host, port, ssl_port, test_mode=True,
                             cherrypy_port=cherrypy_port, model=model)
Exemple #11
0
def setUpModule():
    global host, port, ssl_port, model, test_server, fake_iso
    cherrypy.request.headers = {"Accept": "application/json"}
    model = mockmodel.MockModel("/tmp/obj-store-test")
    patch_auth()
    port = get_free_port("http")
    ssl_port = get_free_port("https")
    host = "127.0.0.1"
    test_server = run_server(host, port, ssl_port, test_mode=True, model=model)
    fake_iso = "/tmp/fake.iso"
    open(fake_iso, "w").close()
def setUpModule():
    global test_server, model, host, port, ssl_port, cherrypy_port

    patch_auth()
    model = Model(None, '/tmp/obj-store-test')
    host = '127.0.0.1'
    port = get_free_port('http')
    ssl_port = get_free_port('https')
    cherrypy_port = get_free_port('cherrypy_port')
    test_server = run_server(host, port, ssl_port, test_mode=True,
                             cherrypy_port=cherrypy_port, model=model)
Exemple #13
0
def setUpModule():
    global test_server, model, host, port, ssl_port, cherrypy_port, tmpfile

    utils.patch_auth()
    tmpfile = tempfile.mktemp()
    host = '127.0.0.1'
    port = utils.get_free_port('http')
    ssl_port = utils.get_free_port('https')
    cherrypy_port = utils.get_free_port('cherrypy_port')
    test_server = utils.run_server(host, port, ssl_port, test_mode=True,
                                   cherrypy_port=cherrypy_port)
Exemple #14
0
def setUpModule():
    global test_server, model, host, ssl_port, tmpfile

    patch_auth()
    tmpfile = tempfile.mktemp()
    model = MockModel(tmpfile)
    host = "127.0.0.1"
    port = get_free_port("http")
    ssl_port = get_free_port("https")
    cherrypy_port = get_free_port("cherrypy_port")
    test_server = run_server(host, port, ssl_port, test_mode=True, cherrypy_port=cherrypy_port, model=model)
Exemple #15
0
def setup_server(environment='development'):
    global test_server, model, host, port, ssl_port

    patch_auth()
    host = '127.0.0.1'
    port = get_free_port('http')
    ssl_port = get_free_port('https')
    test_server = run_server(host,
                             port,
                             ssl_port,
                             test_mode=True,
                             environment=environment)
Exemple #16
0
def setUpModule():
    global host, port, ssl_port, model, test_server, fake_iso
    cherrypy.request.headers = {'Accept': 'application/json'}
    model = kimchi.mockmodel.MockModel('/tmp/obj-store-test')
    patch_auth()
    port = get_free_port('http')
    ssl_port = get_free_port('https')
    host = '127.0.0.1'
    test_server = run_server(host, port, ssl_port, test_mode=True,
                             model=model)
    fake_iso = '/tmp/fake.iso'
    open(fake_iso, 'w').close()
Exemple #17
0
def setUpModule():
    global test_server, model, host, port, ssl_port

    model = kimchi.mockmodel.MockModel('/tmp/obj-store-test')
    host = '127.0.0.1'
    port = utils.get_free_port('http')
    ssl_port = utils.get_free_port('https')
    test_server = utils.run_server(host,
                                   port,
                                   ssl_port,
                                   test_mode=True,
                                   model=model)
def setUpModule():
    global test_server, model, host, port, ssl_port

    patch_auth(sudo=False)
    model = kimchi.mockmodel.MockModel("/tmp/obj-store-test")
    host = "127.0.0.1"
    port = get_free_port("http")
    ssl_port = get_free_port("https")
    test_server = run_server(host, port, ssl_port, test_mode=True, model=model)

    # Create fake ISO to do the tests
    construct_fake_iso(fake_iso, True, "12.04", "ubuntu")
Exemple #19
0
def setUpModule():
    global test_server, model, host, port, ssl_port

    patch_auth(sudo=False)
    model = kimchi.mockmodel.MockModel('/tmp/obj-store-test')
    host = '127.0.0.1'
    port = get_free_port('http')
    ssl_port = get_free_port('https')
    test_server = run_server(host, port, ssl_port, test_mode=True, model=model)

    # Create fake ISO to do the tests
    construct_fake_iso(fake_iso, True, '12.04', 'ubuntu')
Exemple #20
0
def setUpModule():
    global test_server, model, host, port, ssl_port, cherrypy_port, tmpfile

    utils.patch_auth()
    tmpfile = tempfile.mktemp()
    model = mockmodel.MockModel(tmpfile)
    host = '127.0.0.1'
    port = utils.get_free_port('http')
    ssl_port = utils.get_free_port('https')
    cherrypy_port = utils.get_free_port('cherrypy_port')
    test_server = utils.run_server(host, port, ssl_port, test_mode=True,
                                   cherrypy_port=cherrypy_port, model=model)
Exemple #21
0
def setUpModule():
    global test_server, model, host, port, ssl_port

    patch_auth(sudo=False)
    model = mockmodel.MockModel('/tmp/obj-store-test')
    host = '127.0.0.1'
    port = get_free_port('http')
    ssl_port = get_free_port('https')
    test_server = run_server(host, port, ssl_port, test_mode=True, model=model)

    # Create fake ISO to do the tests
    construct_fake_iso(fake_iso, True, '12.04', 'ubuntu')
Exemple #22
0
 def setUp(self):
     global host, port, ssl_port, model, test_server
     cherrypy.request.headers = {'Accept': 'application/json'}
     model = kimchi.mockmodel.MockModel('/tmp/obj-store-test')
     patch_auth()
     port = get_free_port('http')
     ssl_port = get_free_port('https')
     host = '127.0.0.1'
     test_server = run_server(host,
                              port,
                              ssl_port,
                              test_mode=True,
                              model=model)
Exemple #23
0
def setup_server(environment='development'):
    global test_server, model, host, port, ssl_port

    patch_auth()
    model = kimchi.mockmodel.MockModel('/tmp/obj-store-test')
    host = '127.0.0.1'
    port = get_free_port('http')
    ssl_port = get_free_port('https')
    test_server = run_server(host,
                             port,
                             ssl_port,
                             test_mode=True,
                             model=model,
                             environment=environment)
Exemple #24
0
    def create_pyro_proxy(self):
        """Create proxy to make connections to BigBrother.

        Gets the network address of the network interface indicated in
        json file.
        Obtain a connection port from the port indicated in json file.

        Next, it creates a daemon that is the connection proxy
        to BigBrother and registers itself.

        It is working in the background listening to requests.
        """
        try:
            myip = utils.get_ip_address(ifname=self.config["interface"])
            myport = utils.get_free_port(self.config["proxy_port"], ip=myip)

            daemon = Pyro4.Daemon(host=myip, port=myport)
            daemon._pyroHmacKey = self.config["proxy_password"].encode()

            daemon.PYRO_MAXCONNECTIONS = 20

            self.uri = daemon.register(self, objectId="bigbrother")
            print(
                colored("\nBigBrother running : {}".format(self.uri), 'green'))
            self.public_pyro4ns.register("bigbrother", self.uri)
            daemon.requestLoop()
        except Exception:
            print("Error creating proxy on interface",
                  self.config["interface"])
            raise
    def run_instance(self, container_name, keepalive_containers):
        self.lock.acquire()
        port = utils.get_free_port()

        if port == -1:
            raise Exception("failed to run instance, couldn't find available port")

        try:
            container = self.client.containers.run(
                ports={f'{port}/tcp': f'{port}/tcp'},
                privileged=True,
                remove=True,
                name=container_name,
                detach=True,
                image='privileged_vuln_host',
                mem_limit='64m',
                memswap_limit='64m',
                read_only=False,
                cpu_period=100000,
                cpu_quota=25000
            )
            self.run_vulnerable_container(container, port)
            self.create_nginx_config(container_name, port)
            keepalive_containers[container.name] = datetime.datetime.now()
            app.logger.info(f'challenge container created for {container_name}')
        except (docker.errors.BuildError, docker.errors.APIError) as e:
            app.logger.error(f'container build failed for {container_name}: {e}')
        except Exception as e:
            app.logger.error(f'unknown error while building container for {container_name}: {e}')
        self.lock.release()
        threading.Thread(target=self.win_check).start()
Exemple #26
0
    def __init__(self, id=None):

        self._finger = [None] * M
        self._predecessor = None
        self.successor_list = []
        self._lock = Lock()
        self.ip = get_local_ip()
        self.port = get_free_port(self.ip)
        self.id = hash(self) if id is None else id
        self._my_remote = RemoteObj(self.id, self.ip, self.port)

        self.stabilize_thread = Thread(
            target=lambda: self.stabilize_loop(interval=2.1), daemon=True)
        self.stabilize_alive = False
        self.fix_thread = Thread(target=lambda: self.fix_loop(interval=2.2),
                                 daemon=True)
        self.fix_alive = False
        self.successors_thread = Thread(
            target=lambda: self.successors_loop(interval=2.3), daemon=True)
        self.successors_alive = False
        self.start_service_thread = Thread(target=lambda: self.start_service(),
                                           daemon=True)

        self.storage = Storage(self.id)
        self.remote_storage = RemoteObj(self.storage.id, self.storage.ip,
                                        self.storage.port)
    def __init__(self, id, ip = None):
        self.ip = get_local_ip() if ip is None else ip
        self.port = get_free_port(self.ip)
        self.id = id
        self.files = {}
        self.files_backups = {}
        self._files_backup_lock = Lock()
        self.storage_path = "./storage/%s"%(str(self.id))
        self._files_lock = Lock()
        self.files_uplading = {}
        # self.files_download_count = {}

        self.file_tags = {}
        self.file_tags_backups = {}
        self._file_tags_lock = Lock()
        
        self.tags = {}
        self.tags_backups = {}
        self._tags_lock = Lock()

        try:
            os.mkdir('storage')
        except FileExistsError:
            pass
        try:
            shutil.rmtree(self.storage_path)
        except FileNotFoundError:
            pass
        try:
            os.mkdir(self.storage_path)
        except:
            print("Creation of the directory %s failed" % self.storage_path)

        self._start_thread = Thread(target = lambda: self._start_service(), daemon=True)
Exemple #28
0
def setUpModule():
    global test_server, model, host, port, ssl_port, cherrypy_port

    patch_auth()
    model = kimchi.mockmodel.MockModel('/tmp/obj-store-test')
    host = '127.0.0.1'
    port = get_free_port('http')
    ssl_port = get_free_port('https')
    cherrypy_port = get_free_port('cherrypy_port')
    test_server = run_server(host, port, ssl_port, test_mode=True,
                             cherrypy_port=cherrypy_port, model=model)

    # Create fake ISO to do the tests
    iso_gen.construct_fake_iso(fake_iso, True, '12.04', 'ubuntu')
    iso_gen.construct_fake_iso("/var/lib/libvirt/images/fedora.iso", True,
                               "17", "fedora")
Exemple #29
0
    def run_instance(self, user_id):
        self.lock.acquire()
        port = utils.get_free_port()

        if port == -1:
            raise Exception(
                "failed to run instance, couldn't find available port")

        try:
            container = self.client.containers.run(
                ports={f'{port}/tcp': f'{port}/tcp'},
                privileged=True,
                remove=True,
                name=user_id,
                detach=True,
                image='runc_vuln_host',
                mem_limit='250m',
                memswap_limit=
                '250m',  # when swap limit is the same as mem, then container doesn't have access to swap
                cpu_shares=512,  # cpu cycles limit
                # storage_opt={'size': '512m'},  # https://stackoverflow.com/questions/33013904/how-to-limit-docker-filesystem-space-available-to-containers
            )
            self.run_vulnerable_container(container, port)
            self.create_nginx_config(user_id, port)
            app.logger.info(f'challenge container created for {user_id}')
        except (docker.errors.BuildError, docker.errors.APIError) as e:
            app.logger.error(f'container build failed for {user_id}: {e}')
        except Exception as e:
            app.logger.error(
                f'unknown error while building container for {user_id}: {e}')
        self.lock.release()
Exemple #30
0
def mainloop(server: str, port: int, token: bytes, worker_args: dict):
    loop_threads: "list[threading.Thread]"
    loop_threads = []
    if worker_args['static']:
        worker_args['host'] = '127.0.0.1'
        worker_args['port'] = utils.get_free_port()
        t = threading.Thread(target=static_http_server,
                             args=(str(worker_args['static']),
                                   worker_args['port'], worker_args['host']))
        t.start()
    worker = ClientWorker(host=worker_args.get('host'),
                          port=worker_args.get('port'),
                          remote_port=worker_args.get('remote_port'),
                          mode=worker_args.get('mode'),
                          domain=worker_args.get('domain', ''),
                          bufsize=worker_args.get('bufsize', 1024))
    for i in range(0, 3):
        errno = worker.login(server, port, token)
        logging.debug('mainloop login, errno ' + str(errno))
        if errno != 0:
            if i == 2:
                logging.error('login failure times exceeded!')
                return
            else:
                logging.warn("login failure: %d, retrying..." % errno)
                time.sleep(3)
        else:
            break
    r_thread, w_thread = worker.run()
    loop_threads.append(r_thread)
    loop_threads.append(w_thread)

    for t in loop_threads:
        t.join()
Exemple #31
0
def setUpModule():
    global test_server, model, host, port, ssl_port

    model = kimchi.mockmodel.MockModel("/tmp/obj-store-test")
    host = "127.0.0.1"
    port = utils.get_free_port("http")
    ssl_port = None
    test_server = utils.run_server(host, port, ssl_port, test_mode=True, model=model)
Exemple #32
0
def setUpModule():
    global test_server, model, host, port, ssl_port

    patch_auth(sudo=False)
    model = kimchi.mockmodel.MockModel('/tmp/obj-store-test')
    host = '127.0.0.1'
    port = get_free_port('http')
    test_server = run_server(host, port, None, test_mode=True, model=model)
Exemple #33
0
def setUpModule():
    global test_server, model, host, port, ssl_port

    patch_auth(sudo=False)
    model = kimchi.mockmodel.MockModel("/tmp/obj-store-test")
    host = "127.0.0.1"
    port = get_free_port("http")
    test_server = run_server(host, port, None, test_mode=True, model=model)
Exemple #34
0
 def test_server_start(self):
     """
     Test that we can start a server and receive HTTP:200.
     """
     host = '127.0.0.1'
     port = utils.get_free_port('http')
     ssl_port = utils.get_free_port('https')
     model = kimchi.mockmodel.MockModel('/tmp/obj-store-test')
     s = utils.run_server(host, port, ssl_port, test_mode=True, model=model)
     try:
         resp = utils.request(host, ssl_port, '/')
         self.assertEquals(200, resp.status)
     except:
         raise
     finally:
         os.unlink('/tmp/obj-store-test')
         s.stop()
Exemple #35
0
 def test_server_start(self):
     """
     Test that we can start a server and receive HTTP:200.
     """
     host = '127.0.0.1'
     port = utils.get_free_port('http')
     ssl_port = utils.get_free_port('https')
     model = kimchi.mockmodel.MockModel('/tmp/obj-store-test')
     s = utils.run_server(host, port, ssl_port, test_mode=True, model=model)
     try:
         resp = utils.request(host, ssl_port, '/')
         self.assertEquals(200, resp.status)
     except:
         raise
     finally:
         os.unlink('/tmp/obj-store-test')
         s.stop()
 def register(self, device):
     config_file = tempfile.NamedTemporaryFile(mode="w+", delete=False)
     port = get_free_port()
     config = self.generate_config(device, port)
     config_file.write(config)
     config_file.flush()
     node = AppiumNode(port, device, config_file.name, self.additional_args)
     node.start()
     self.nodes.append(node)
Exemple #37
0
 def _command(self):
     command = [
         self.appium_executable,
         "--port", str(self.port),
         "--bootstrap-port", str(get_free_port()),
         "--udid", self.device.name]
     if self.config_file:
         command += ["--nodeconfig", self.config_file]
     return command
Exemple #38
0
 def __init__(self, appium_port, device, config_file=None, generate_bootstrap_port=True, additional_args=None):
     self.appium_port = appium_port
     self.device = device
     self.config_file = config_file
     self.generate_bootstrap_port = generate_bootstrap_port
     self.additional_args = additional_args
     self.log = logging.getLogger(self.device.name)
     if not os.path.exists(LOG_DIR):
         os.makedirs(LOG_DIR)
     self.logfile = os.sep.join([LOG_DIR, device.name])
     if self.generate_bootstrap_port:
         self.bootstrap_port = get_free_port()
Exemple #39
0
    def _command(self):
        command = [
            self.appium_executable,
            "--port", str(self.port),
            "--bootstrap-port", str(get_free_port()),
            "--udid", self.device.name]

        if self.additional_args:
            command += self.additional_args

        if self.config_file:
            command += ["--nodeconfig", self.config_file]
        return command
Exemple #40
0
    def run_instance(self, container_name, keepalive_containers):
        self.lock.acquire()
        port = utils.get_free_port()

        if port == -1:
            raise Exception(
                "failed to run instance, couldn't find available port")

        try:
            # Running host container is implemented with subprocess call, because
            # docker python SDK do not support "--experimental" flag and I couldn't
            # find the way, to enable it through some config file or env variable.
            run_cmd = ' '.join([
                '/usr/bin/docker',
                'run',
                f'-p {port}:{port}',
                '--privileged',
                '--rm',
                f'--name {container_name}',
                '-d',
                '-e DOCKER_HOST=unix:///run/user/1000/docker.sock',
                '--memory=64m',
                '--memory-swap=64m',
                # '--cpus=0.25',
                'runc_vuln_host',
                '--experimental',
            ])
            result = subprocess.getoutput(run_cmd)
            # Docker client prints warning when memory limitations are used, so
            # we cannot be sure that command output will be only id of spawned
            # container. That's why regex below is required to find this id.
            # Example:
            #   WARNING: Your kernel does not support swap limit capabilities or
            #   the cgroup is not mounted. Memory limited without swap.
            #   ad9d0928ad507baa9e4fadcf6a21c953248bcdfc1dd48988aad98efac870661d
            container_id = re.search('[a-z0-9]{64}', result).group(0)
            container = self.client.containers.get(container_id)
            self.run_vulnerable_container(container, port)
            self.create_nginx_config(container_name, port)
            keepalive_containers[container.name] = datetime.datetime.now()
            app.logger.info(
                f'challenge container created for {container_name}')
        except (docker.errors.BuildError, docker.errors.APIError) as e:
            app.logger.error(
                f'container build failed for {container_name}: {e}')
        except Exception as e:
            app.logger.error(
                f'unknown error while building container for {container_name}: {e}'
            )
        self.lock.release()
Exemple #41
0
 def test_server_start(self):
     """
     Test that we can start a server and receive a response.  Right now we
     have no content so we expect HTTP:404
     """
     host = '127.0.0.1'
     port = utils.get_free_port()
     s = utils.run_server(host, port, test_mode=True)
     try:
         resp = utils.request(host, port, '/')
         data = json.loads(resp.read())
         self.assertEquals(200, resp.status)
         self.assertEquals('localhost', data['hostname'])
     except:
         raise
     finally:
         s.stop()
Exemple #42
0
def main():
    parser = argparse.ArgumentParser()


    parser.add_argument("--squad_train_data", default=None, type=str, required=True,help="SQuAD json for training. (train-v1.1.json)")
    parser.add_argument("--squad_dev_data", default=None, type=str, required=True,help="SQuAD json for evaluation. (dev-v1.1.json)")
    parser.add_argument("--squad_eval_script", default=None, type=str, required=True,help="SQuAD evaluation script. (evaluate-v1.1.py)")
    parser.add_argument("--model_student", default="bert-large-uncased-whole-word-masking", type=str, required=False,help="Path to pre-trained model")
    parser.add_argument("--model_teacher", default="bert-large-uncased-whole-word-masking", type=str, required=False,help="Path to pre-trained model for supervision")
    parser.add_argument("--output_dir", default='bert-large-uncased-whole-word-masking-qa-squad', type=str, required=True,help="The output directory for embedding model")
    parser.add_argument("--total_train_batch_size", default=48, type=int,help="Batch size to make one optimization step.")
    parser.add_argument("--per_gpu_train_batch_size", default=2, type=int,help="Batch size per GPU for training.")
    parser.add_argument("--per_gpu_eval_batch_size", default=16, type=int,help="Batch size per GPU for evaluation.")
    parser.add_argument("--learning_rate", default=3e-5, type=float, help="The initial learning rates for Adam.")
    parser.add_argument("--num_train_epochs", default=2.0, type=float,help="Number of epochs for one stage train")
    parser.add_argument("--no_cuda", action='store_true',help="Disable GPU calculation")
    parser.add_argument("--max_seq_length_q", default=64, type=int,help="The maximum total input sequence length for question")
    parser.add_argument("--max_seq_length_c", default=384, type=int,help="The maximum total input sequence length for context + question")

    parser.add_argument("--supervision_weight", default=0.02, type=float, required=False, help="set to more than 0 to use l2 loss between hidden states")
    parser.add_argument("--kd_weight", default=1, type=float, required=False, help="set to more than 0 to use kd loss between output logits")

    parser.add_argument("--loss_cfg", default="", type=str,help="loss type.")
    parser.add_argument("--nncf_config", default=None, type=str,help="config json file for quantization by nncf.")
    parser.add_argument("--freeze_list", default="", type=str,help="list of subnames to define parameters that will not be tuned")


    args = parser.parse_args()

    if torch.cuda.is_available() and not args.no_cuda:
        args.n_gpu = torch.cuda.device_count()
    else:
        args.n_gpu = 0

    for k,v in sorted(vars(args).items(), key=lambda x:x[0]):
        printlog('parameter',k,v)

    if args.n_gpu > 1:
        port = utils.get_free_port()
        printlog("torch.multiprocessing.spawn is started")
        torch.multiprocessing.spawn(process, args=(args,port,), nprocs=args.n_gpu, join=True)
        printlog("torch.multiprocessing.spawn is finished")
    else:
        printlog("single process mode")
        process(-1, args, None)
Exemple #43
0
def _start_server():
    global port
    
    if not use_manual_port:
        port = utils.get_free_port()

    try:
        ip = "0.0.0.0"
        ts = TemplateServer((ip, port), TemplateHandler)    
        t_name = "SuperStyler template server thread"
        t = threading.Thread(target=ts.serve_forever, name=t_name)
        t.daemon = True
        t.start()
    except SocketServer.socket.error, e:
        from aqt.utils import showInfo
        s = ("SuperStyler failed to open a server. Make sure the chosen port "
             "(%s) is not in use.\n\nError was: %s") % (port, str(e))
        showInfo(s)
Exemple #44
0
    def start(self):
        if not self._shutdown:
            log('(Torrent) Find free port', LOGLEVEL.INFO)
            port = get_free_port()

            log('(Torrent) Starting torrent2http', LOGLEVEL.INFO)
            startupinfo = None
            if Platform.system == "windows":
                startupinfo = subprocess.STARTUPINFO()
                startupinfo.dwFlags |= 1
                startupinfo.wShowWindow = 0

            if _settings.debug:
                self._logpipe = LogPipe(self._debug)

            torrent_options = self._mediaSettings.get_torrent_options(
                self._magnet, port)

            try:
                self._process = subprocess.Popen(torrent_options,
                                                 stderr=self._logpipe,
                                                 stdout=self._logpipe,
                                                 startupinfo=startupinfo)
            except Exception as e:

                raise TorrentError("Can't start torrent2http: %s" %
                                   str(sys.exc_info()[1]))
            self._url = "http://127.0.0.1:%s/" % port

            start = time.time()
            while not self._shutdown:
                if (time.time() - start) > 5 or not self.isAlive():
                    raise TorrentError("Can't start torrent2http")
                if not self.status(1)['state'] == self.NO_CONNECTION:
                    log("(Torrent) torrent2http successfully started")
                    return True
        return False
    def run(self, ):
        log.info("start registring devices...")
        try:
            while True:
                already_handled_devices = {node.device.name: node for node in self.nodes}
                for device in android_devices():
                    if device.name in already_handled_devices.keys():
                        del already_handled_devices[device.name]
                        continue

                    config_file = tempfile.NamedTemporaryFile(mode="w+", delete=False)
                    port = get_free_port()
                    config = self.generate_config(device, port)
                    config_file.write(config)
                    config_file.flush()
                    node = AppiumNode(port, device, config_file.name)
                    node.start()
                    self.nodes.append(node)

                for node in already_handled_devices.values():
                    node.stop()
                    self.nodes.remove(node)
        except (StopAutoregister, KeyboardInterrupt, SystemExit):
            self.stop()
Exemple #46
0
def start_name_server():
    ip = get_local_ip()
    port = get_free_port(ip)
    Pyro4.naming.startNSloop(ip, port)
Exemple #47
0
 def __init__(self, ip = None):
     self.ip = get_local_ip() if ip is None else ip
     self.port = get_free_port(self.ip)
     self.id = hash(self)
     self.start_service_thread = Thread(target=lambda : self._start_service(), daemon=True)
     self.last_list = []
Exemple #48
0
                    with Pyro4.Proxy(ns.list()[server]) as p:
                        remote_node = p.get_info()
                    break
                except:
                    pass
    except Exception as x:
        print(x)

    if start_ns:
        start_name_server_thread = Thread(target=lambda: start_name_server(),
                                          daemon=True)
        start_name_server_thread.start()
        time.sleep(2)

    node = Node(sys.argv, remote_node)

    ip = get_local_ip()
    port = get_free_port(ip)

    print(node.dht)

    daemon = Pyro4.Daemon(host=ip, port=port)
    start_custom_serveSimple(ip, port, daemon, node, f'run_node{ip}:{port}')
    # Pyro4.Daemon.serveSimple(
    #     {
    #         node: f'run_node'#-{ip}:{port}'
    #     },
    #     ns = True,
    #     daemon = daemon
    # )
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--squad_train_data",
                        default=None,
                        type=str,
                        required=True,
                        help="SQuAD json for training. (train-v1.1.json)")
    parser.add_argument("--squad_dev_data",
                        default=None,
                        type=str,
                        required=True,
                        help="SQuAD json for evaluation. (dev-v1.1.json)")
    parser.add_argument("--squad_eval_script",
                        default=None,
                        type=str,
                        required=True,
                        help="SQuAD evaluation script. (evaluate-v1.1.py)")
    parser.add_argument("--model_student",
                        default=None,
                        type=str,
                        required=True,
                        help="Path to pre-trained model")
    parser.add_argument("--model_teacher",
                        default=None,
                        type=str,
                        required=True,
                        help="Path to pre-trained model for supervision")
    parser.add_argument("--output_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The output directory for packed model")
    parser.add_argument("--max_seq_length_c",
                        default=384,
                        type=int,
                        help="The maximum tokens for context")
    parser.add_argument("--max_seq_length_q",
                        default=64,
                        type=int,
                        help="The maximum tokens for question")
    parser.add_argument("--total_train_batch_size",
                        default=32,
                        type=int,
                        help="Batch size to make one optimization step")
    parser.add_argument("--per_gpu_train_batch_size",
                        default=4,
                        type=int,
                        help="Batch size per GPU for training.")
    parser.add_argument("--per_gpu_eval_batch_size",
                        default=32,
                        type=int,
                        help="Batch size per GPU for evaluation.")
    parser.add_argument("--learning_rate",
                        default=5e-4,
                        type=float,
                        help="The learning rates for Adam.")
    parser.add_argument("--num_train_epochs",
                        default=16.0,
                        type=float,
                        help="Number of epochs for one stage train")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Disable GPU calculation")

    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="seed for random generators")
    parser.add_argument(
        "--pack_cfg",
        default=
        "num_hidden_layers:12,ff_iter_num:4,num_attention_heads:8,hidden_size:512,pack_emb:1,hidden_act:orig",
        type=str,
        help="string for pack configuration")
    parser.add_argument(
        "--loss_weight_alpha",
        default=1.5,
        type=float,
        help=
        "alpha to define weights for losses on the final tune. w_i = alpha^i. higher weight for layers closer to output"
    )
    parser.add_argument(
        "--total_train_batch_size_for_tune",
        default=None,
        type=int,
        help="Batch size for one optimization step for final tune.")
    parser.add_argument(
        "--learning_rate_for_tune",
        default=None,
        type=float,
        help="The initial learning rates for Adam for final model tune.")

    args = parser.parse_args()

    if torch.cuda.is_available() and not args.no_cuda:
        args.n_gpu = torch.cuda.device_count()
    else:
        args.n_gpu = 0

    for k, v in sorted(vars(args).items(), key=lambda x: x[0]):
        printlog('parameter', k, v)

    if args.n_gpu > 1:
        port = utils.get_free_port()
        printlog("torch.multiprocessing.spawn is started")
        torch.multiprocessing.spawn(process,
                                    args=(
                                        args,
                                        port,
                                    ),
                                    nprocs=args.n_gpu,
                                    join=True)
        printlog("torch.multiprocessing.spawn is finished")
    else:
        process(-1, args)
Exemple #50
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument("--squad_train_data",
                        default=None,
                        type=str,
                        required=True,
                        help="SQuAD json for training. E.g., train-v1.1.json")
    parser.add_argument("--squad_dev_data",
                        default=None,
                        type=str,
                        required=True,
                        help="SQuAD json for evaluation. E.g., dev-v1.1.json")
    parser.add_argument("--model_student",
                        default="bert-large-uncased-whole-word-masking",
                        type=str,
                        required=False,
                        help="Path to pre-trained model")
    parser.add_argument("--model_teacher",
                        default="bert-large-uncased-whole-word-masking",
                        type=str,
                        required=False,
                        help="Path to pre-trained model for supervision")
    parser.add_argument(
        "--output_dir",
        default='bert-large-uncased-whole-word-masking-emb-squad',
        type=str,
        required=True,
        help="The output directory for embedding model")
    parser.add_argument(
        "--max_seq_length_q",
        default=32,
        type=int,
        help="The maximum total input sequence length for question")
    parser.add_argument(
        "--max_seq_length_c",
        default=384,
        type=int,
        help="The maximum total input sequence length for context")
    parser.add_argument("--total_train_batch_size",
                        default=32,
                        type=int,
                        help="Batch size to make one optimization step.")
    parser.add_argument(
        "--per_gpu_train_batch_size",
        default=4,
        type=int,
        help="Batch size for one GPU inference on train stage.")
    parser.add_argument(
        "--per_gpu_eval_batch_size",
        default=32,
        type=int,
        help="Batch size for one GPU inference on evaluation stage.")
    parser.add_argument("--learning_rate",
                        default=5e-4,
                        type=float,
                        help="The initial learning rates for Adam.")
    parser.add_argument("--num_train_epochs",
                        default=8.0,
                        type=float,
                        help="Number of epochs to train")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Disable GPU calculation")

    parser.add_argument("--hnm_batch_size",
                        default=32,
                        type=int,
                        help="number of mined hard negatives for one gpu")
    parser.add_argument(
        "--hnm_num",
        default=256,
        type=int,
        help="number of mined hard negatives for each optimization step.")
    parser.add_argument(
        "--hnm_hist_num",
        default=32,
        type=int,
        help="number of mined hard negatives history optimization steps.")
    parser.add_argument(
        "--hnm_hist_alpha",
        default=1.0,
        type=float,
        help="multiplier to increase distance for negatives for older steps.")

    parser.add_argument("--loss_cfg",
                        default="",
                        type=str,
                        help="loss configuration.")
    parser.add_argument("--nncf_config",
                        default=None,
                        type=str,
                        help="config json file for nncf quantization.")
    parser.add_argument(
        "--freeze_list",
        default="",
        type=str,
        help=
        "list of subnames to define model parameters that will not be tuned")
    parser.add_argument(
        "--supervision_weight",
        default=0,
        type=float,
        required=False,
        help="set to more than 0 to use l2 loss between hidden states")

    args = parser.parse_args()

    if torch.cuda.is_available() and not args.no_cuda:
        args.n_gpu = torch.cuda.device_count()
    else:
        args.n_gpu = 0

    for k, v in sorted(vars(args).items(), key=lambda x: x[0]):
        printlog('parameter', k, v)

    if args.n_gpu > 1:
        port = utils.get_free_port()
        printlog("torch.multiprocessing.spawn is started")
        torch.multiprocessing.spawn(process,
                                    args=(
                                        args,
                                        port,
                                    ),
                                    nprocs=args.n_gpu,
                                    join=True)
        printlog("torch.multiprocessing.spawn is finished")
    else:
        process(-1, args, 0)
def main(args=None):
    parser = argparse.ArgumentParser()

    parser.add_argument(
        "--model_desc",
        default="PoCoNetLikeModel",
        type=str,
        required=False,
        help="model directory or model json or model desc string")
    parser.add_argument(
        "--dns_datasets",
        type=str,
        default=None,
        required=True,
        help="DNS-Chalange datasets directory")
    parser.add_argument(
        "--eval_data",
        default=None,
        type=str,
        required=False,
        help="synthetic dataset to validate <dns-datasets>/ICASSP_dev_test_set/track_1/synthetic")
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help="The output directory for model")
    parser.add_argument(
        "--total_train_batch_size",
        default=128,
        type=int,
        help="Batch size to make one optimization step.")
    parser.add_argument(
        "--per_gpu_train_batch_size",
        default=6,
        type=int,
        help="Batch size per GPU for training.")
    parser.add_argument(
        "--learning_rate",
        default=1e-4,
        type=float,
        help="The initial learning rates for optimizer.")
    parser.add_argument(
        "--weight_decay",
        default=1e-2,
        type=float,
        help="The weight decay for optimizer")
    parser.add_argument(
        "--num_train_epochs",
        default=10.0,
        type=float,
        help="Number of epochs to train")
    parser.add_argument(
        "--no_cuda",
        action='store_true',
        help="Disable GPU calculation")
    parser.add_argument(
        "--seed",
        default=42,
        type=int,
        help="Seed for different inittializations")
    parser.add_argument(
        "--logacc",
        default=50,
        type=int,
        help="Number of optimization steps before log")

    parser.add_argument(
        "--size_to_read",
        default=4.0,
        type=float,
        help="number of second in batch to train infer")

    parser.add_argument(
        "--snr_min",
        default=-20,
        type=float,
        help="Minimal SNR value (dB) for mixing clean signal and noise")
    parser.add_argument(
        "--snr_max",
        default=+10,
        type=float,
        help="Maximal SNR value (dB) for mixing clean signal and noise")
    parser.add_argument(
        "--target_min",
        default=-30,
        type=float,
        help="Minimal (dBFS) for input mixed signal")
    parser.add_argument(
        "--target_max",
        default=-5,
        type=float,
        help="Maximal (dBFS) for input mixed signal")
    parser.add_argument(
        "--clip_prob",
        default=0.1,
        type=float,
        help="Probability to clip input mixed signal")

    args = parser.parse_args(args)

    if torch.cuda.is_available() and not args.no_cuda:
        args.n_gpu = torch.cuda.device_count()
    else:
        args.n_gpu = 0

    for k,v in sorted(vars(args).items(), key=lambda x:x[0]):
        printlog('parameter',k,v)

    if args.n_gpu > 1:
        port = utils.get_free_port()
        printlog("torch.multiprocessing.spawn is started")
        torch.multiprocessing.spawn(process, args=(args,port,), nprocs=args.n_gpu, join=True)
        printlog("torch.multiprocessing.spawn is finished")
    else:
        printlog("single process mode")
        process(-1, args, None)