Пример #1
0
    def __get_modified_entity(self, change):
        try:
            metadata = self.database.session.query(server.models.EntityMetadata)\
                .filter(server.models.EntityMetadata.couchdb_id == change.doc_id)\
                .one_or_none()
        except MultipleResultsFound:
            logger.warning(u'Multiple entities were found for doc {}.'\
                'Ignoring change'.format(change.doc_id))
            return None

        if metadata is not None:
            # Obtain the proper table on which to perform the entity operation
            entity_cls = server.models.FaradayEntity.get_entity_class_from_type(
                metadata.document_type)

            entity = self.database.session.query(entity_cls)\
                .join(server.models.EntityMetadata)\
                .filter(server.models.EntityMetadata.couchdb_id == change.doc_id)\
                .one()

            return entity

        else:
            logger.info(u'Doc {} was not found in the database'.format(
                change.doc_id))
            return None
Пример #2
0
    def __get_modified_entity(self, change):
        try:
            metadata = self.database.session.query(server.models.EntityMetadata)\
                .filter(server.models.EntityMetadata.couchdb_id == change.doc_id)\
                .one_or_none()
        except MultipleResultsFound:
            logger.warning(u'Multiple entities were found for doc {}.'\
                'Ignoring change'.format(change.doc_id))
            return None

        if metadata is not None:
            # Obtain the proper table on which to perform the entity operation
            entity_cls = server.models.FaradayEntity.get_entity_class_from_type(
                metadata.document_type)
            
            entity = self.database.session.query(entity_cls)\
                .join(server.models.EntityMetadata)\
                .filter(server.models.EntityMetadata.couchdb_id == change.doc_id)\
                .one()

            return entity

        else:
            logger.info(u'Doc {} was not found in the database'.format(change.doc_id))
            return None
Пример #3
0
    def __iter__(self):
        while not self.__stop:
            try:
                # TODO: Connection timeout is too long.
                self.__response = requests.get(self.__url,
                                               params=self.__params,
                                               stream=True,
                                               auth=get_auth_info())

                for raw_line in self.__response.iter_lines():
                    line = self.__sanitize(raw_line)
                    if not line:
                        continue

                    change = self.__parse_change(line)
                    if not change:
                        continue

                    yield change

            except Exception, e:
                import traceback
                logger.debug(traceback.format_exc())

                # Close everything but keep retrying
                self.stop()
                self.__stop = True

                logger.warning(
                    u"Lost connection to CouchDB. Retrying in 5 seconds...")
                time.sleep(5)
                logger.info(u"Retrying...")
Пример #4
0
    def run(self):
        for change_doc in self.__stream:
            try:
                if self.CHANGE_CLS.validate(change_doc):
                    self.__changes_callback(self.CHANGE_CLS(change_doc))
                else:
                    logger.debug(u'Ignoring change: {}'.format(change_doc))

            except Exception, e:
                import traceback
                logger.debug(traceback.format_exc())
                logger.warning(
                    u"Error while processing change. Ignoring. Offending change: {}"
                    .format(change_doc))

                if change_doc.get('error', None):
                    if change_doc.get('error') == 'unauthorized':
                        logger.error(u"Unauthorized access to CouchDB. Make sure faraday-server's"\
                            " configuration file has CouchDB admin's credentials set")
                        thread.interrupt_main()

                    # TODO: A proper fix is needed here
                    elif change_doc.get('reason') == 'no_db_file':
                        self.__stream.stop()
                        break
Пример #5
0
    def __iter__(self):
        while not self.__stop:
            try:
                # TODO: Connection timeout is too long.
                self.__response = requests.get(
                    self.__url, params=self.__params,
                    stream=True, auth=get_auth_info())

                for raw_line in self.__response.iter_lines():
                    line = self.__sanitize(raw_line) 
                    if not line:
                        continue

                    change = self.__parse_change(line)
                    if not change:
                        continue

                    yield change

            except Exception, e:
                import traceback
                logger.debug(traceback.format_exc())

                # Close everything but keep retrying
                self.stop()
                self.__stop = True

                logger.warning(u"Lost connection to CouchDB. Retrying in 5 seconds...")
                time.sleep(5)
                logger.info(u"Retrying...")
Пример #6
0
 def get_document(self, doc_id):
     try:
         return self.__workspace.get(doc_id)
     except ResourceNotFound:
         logger.warning(
             u"Document {} was not found in CouchDB for Workspace {}".
             format(doc_id, self.__ws_name))
         return {}
Пример #7
0
def upload_views(workspace):
    """ Upload views with couchdb behind of ViewsManager """
    vmanager = ViewsManager()
    try:
        vmanager.addViews(workspace)
    except:
        import traceback
        logger.debug(traceback.format_exc())
        logger.warning("Views documents couldn't be uploaded. You need to be an admin to do it")
Пример #8
0
 def get_document_metadata(self, document_id):
     metadata = None
     try:
         metadata = self.__db_conn.session.query(server.models.EntityMetadata)\
                                          .filter(server.models.EntityMetadata.couchdb_id == document_id)\
                                          .one_or_none()
     except MultipleResultsFound:
         logger.warning(u'Multiple entities were found for doc {}.'\
             'Ignoring change'.format(document_id))
     return metadata
Пример #9
0
 def get_document_metadata(self, document_id):
     metadata = None
     try:
         metadata = self.__db_conn.session.query(server.models.EntityMetadata)\
                                          .filter(server.models.EntityMetadata.couchdb_id == document_id)\
                                          .one_or_none()
     except MultipleResultsFound:
         logger.warning(u'Multiple entities were found for doc {}.'\
             'Ignoring change'.format(document_id))
     return metadata
Пример #10
0
def upload_views(workspace):
    """ Upload views with couchdb behind of ViewsManager """
    vmanager = ViewsManager()
    try:
        vmanager.addViews(workspace)
    except:
        import traceback
        logger.debug(traceback.format_exc())
        logger.warning(
            "Views documents couldn't be uploaded. You need to be an admin to do it"
        )
Пример #11
0
def ask_to_install(missing_packages):
    logger.warning("The following packages are not installed:")
    for package in missing_packages:
        logger.warning("%s" % package)

    if query_yes_no("Do you want to install them?", default="no"):
        checker = DependencyChecker(server.config.REQUIREMENTS_FILE)
        checker.install_packages(missing_packages)
        return True

    return False
Пример #12
0
    def __create_and_import_workspace(self, ws_name):
        new_db_conn = Connector(ws_name)

        if new_db_conn.exists():
            # TODO(mrocha): if somehow this happens, then we should check for integrity and reimport
            # if necessary. After that we should add it into the databases dict
            logger.warning(u"Workspace {} already exists but wasn't registered at startup".format(ws_name))
        else:
            server.importer.import_workspace_into_database(ws_name, new_db_conn)

        self.__init_workspace(ws_name, db_conn=new_db_conn)
Пример #13
0
def push_reports():
    vmanager = ViewsManager()
    try:
        logger.debug(u'Pushing Reports DB into CouchDB')
        couchdb_server = CouchDBServer()
        workspace = couchdb_server.get_or_create_db('reports')
        vmanager.addView(config.REPORTS_VIEWS_DIR, workspace)
    except:
        import traceback
        logger.debug(traceback.format_exc())
        logger.warning("Reports database couldn't be uploaded. You need to be an admin to do it")
Пример #14
0
def ask_to_install(missing_packages):
    logger.warning("The following packages are not installed:")
    for package in missing_packages:
        logger.warning("%s" % package)

    if query_yes_no("Do you want to install them?", default="no"):
        checker = DependencyChecker(server.config.REQUIREMENTS_FILE)
        checker.install_packages(missing_packages)
        return True

    return False
Пример #15
0
def push_reports():
    vmanager = ViewsManager()
    try:
        logger.debug(u'Pushing Reports DB into CouchDB')
        couchdb_server = CouchDBServer()
        workspace = couchdb_server.get_or_create_db('reports')
        vmanager.addView(config.REPORTS_VIEWS_DIR, workspace)
    except:
        import traceback
        logger.debug(traceback.format_exc())
        logger.warning(
            "Reports database couldn't be uploaded. You need to be an admin to do it"
        )
Пример #16
0
    def __create_and_import_workspace(self, ws_name):
        new_db_conn = Connector(ws_name)

        if new_db_conn.exists():
            # TODO(mrocha): if somehow this happens, then we should check for integrity and reimport
            # if necessary. After that we should add it into the databases dict
            logger.warning(
                u"Workspace {} already exists but wasn't registered at startup"
                .format(ws_name))
        else:
            server.importer.import_workspace_into_database(
                ws_name, new_db_conn)

        self.__init_workspace(ws_name, db_conn=new_db_conn)
Пример #17
0
    def run(self):
        for change_doc in self.__stream:
            try:
                self.__changes_callback(self.CHANGE_CLS(change_doc))
            except Exception, e:
                import traceback
                logger.debug(traceback.format_exc())
                logger.warning(u"Error while processing change. Ignoring. Offending change: {}".format(change_doc))

                if change_doc.get('error', None):
                    if change_doc.get('error') == 'unauthorized':
                        logger.error(u"Unauthorized access to CouchDB. Make sure faraday-server's"\
                            " configuration file has CouchDB admin's credentials set")
                        thread.interrupt_main()

                    # TODO: A proper fix is needed here
                    elif change_doc.get('reason') == 'no_db_file':
                        self.__stream.stop()
                        break
Пример #18
0
    def __iter__(self):
        while not self.__stop:
            try:
                # TODO: Connection timeout is too long.
                self.__response = requests.get(self.__url,
                                               params=self.__params,
                                               stream=True,
                                               auth=get_auth_info())

                for raw_line in self.__response.iter_lines():
                    if self.__stop:
                        break

                    line = self.__sanitize(raw_line)
                    if not line:
                        continue

                    change = self.__parse_change(line)
                    if not change:
                        continue

                    yield change

            except Exception, e:
                # On workspace deletion, requests will probably
                # fail to perform the request or the connection
                # will be closed. Check if this was intentional
                # by checking on the __stop flag.
                if self.__stop:
                    break

                import traceback
                logger.debug(traceback.format_exc())

                # Close everything but keep retrying
                self.stop()
                self.__stop = False

                logger.warning(
                    u"Lost connection to CouchDB. Retrying in 3 seconds...")
                time.sleep(3)
                logger.info(u"Retrying...")
Пример #19
0
    def import_from_couchdb(self):
        total_amount = self.couchdb.get_total_amount_of_documents()
        processed_docs, progress = 0, 0
        should_flush_changes = False
        host_entities = {}

        def flush_changes():
            host_entities.clear()
            self.database.session.commit()
            self.database.session.expunge_all()

        for doc in self.couchdb.get_documents(per_request=1000):
            processed_docs = processed_docs + 1
            current_progress = (processed_docs * 100) / total_amount
            if current_progress > progress:
                self.__show_progress(
                    u'  * Importing {} from CouchDB'.format(self.__workspace),
                    progress)
                progress = current_progress
                should_flush_changes = True

            entity = server.models.FaradayEntity.parse(doc.get('doc'))
            if entity is not None:
                if isinstance(entity,
                              server.models.Host) and should_flush_changes:
                    flush_changes()
                    should_flush_changes = False

                try:
                    entity.add_relationships_from_dict(host_entities)
                except server.models.EntityNotFound as e:
                    logger.warning(
                        u"Ignoring {} entity ({}) because its parent wasn't found"
                        .format(entity.entity_metadata.document_type,
                                entity.entity_metadata.couchdb_id))
                else:
                    host_entities[doc.get('key')] = entity
                    self.database.session.add(entity)

        logger.info(u'{} importation done!'.format(self.__workspace))
        flush_changes()
Пример #20
0
    def __iter__(self):
        while not self.__stop:
            try:
                # TODO: Connection timeout is too long.
                self.__response = requests.get(
                    self.__url, params=self.__params,
                    stream=True, auth=get_auth_info())

                for raw_line in self.__response.iter_lines():
                    if self.__stop:
                        break

                    line = self.__sanitize(raw_line)
                    if not line:
                        continue

                    change = self.__parse_change(line)
                    if not change:
                        continue

                    yield change

            except Exception, e:
                # On workspace deletion, requests will probably
                # fail to perform the request or the connection
                # will be closed. Check if this was intentional
                # by checking on the __stop flag.
                if self.__stop:
                    break

                import traceback
                logger.debug(traceback.format_exc())

                # Close everything but keep retrying
                self.stop()
                self.__stop = False

                logger.warning(u"Lost connection to CouchDB. Retrying in 3 seconds...")
                time.sleep(3)
                logger.info(u"Retrying...")
Пример #21
0
    def import_from_couchdb(self):
        total_amount = self.couchdb.get_total_amount_of_documents()
        processed_docs, progress = 0, 0
        should_flush_changes = False
        host_entities = {}

        def flush_changes():
            host_entities.clear()
            self.database.session.commit()
            self.database.session.expunge_all()

        for doc in self.couchdb.get_documents(per_request=1000):
            processed_docs = processed_docs + 1
            current_progress = (processed_docs * 100) / total_amount 
            if current_progress > progress:
                self.__show_progress(u'  * Importing {} from CouchDB'.format(
                    self.__workspace), progress)
                progress = current_progress
                should_flush_changes = True

            entity = server.models.FaradayEntity.parse(doc.get('doc'))
            if entity is not None:
                if isinstance(entity, server.models.Host) and should_flush_changes:
                    flush_changes()
                    should_flush_changes = False

                try:
                    entity.add_relationships_from_dict(host_entities)
                except server.models.EntityNotFound as e:
                    logger.warning(u"Ignoring {} entity ({}) because its parent wasn't found".format(
                        entity.entity_metadata.document_type, entity.entity_metadata.couchdb_id))
                else:
                    host_entities[doc.get('key')] = entity
                    self.database.session.add(entity)

        logger.info(u'{} importation done!'.format(self.__workspace))
        flush_changes()
Пример #22
0
 def onMessage(self, payload, is_binary):
     from server.web import app
     """
         We only support JOIN and LEAVE workspace messages.
         When authentication is implemented we need to verify
         that the user can join the selected workspace.
         When authentication is implemented we need to reply
         the client if the join failed.
     """
     if not is_binary:
         message = json.loads(payload)
         if message['action'] == 'JOIN_WORKSPACE':
             if 'workspace' not in message or 'token' not in message:
                 logger.warning('Invalid join workspace message: '
                                '{}'.format(message))
                 self.sendClose()
                 return
             signer = itsdangerous.TimestampSigner(app.config['SECRET_KEY'],
                                                   salt="websocket")
             try:
                 workspace_id = signer.unsign(message['token'], max_age=60)
             except itsdangerous.BadData as e:
                 self.sendClose()
                 logger.warning('Invalid websocket token for workspace '
                                '{}'.format(message['workspace']))
                 logger.exception(e)
             else:
                 with app.app_context():
                     workspace = Workspace.query.get(int(workspace_id))
                 if workspace.name != message['workspace']:
                     logger.warning(
                         'Trying to join workspace {} with token of '
                         'workspace {}. Rejecting.'.format(
                             message['workspace'], workspace.name
                         ))
                     self.sendClose()
                 else:
                     self.factory.join_workspace(
                         self, message['workspace'])
         if message['action'] == 'LEAVE_WORKSPACE':
             self.factory.leave_workspace(self, message['workspace'])
Пример #23
0
 def onMessage(self, payload, is_binary):
     from server.web import app
     """
         We only support JOIN and LEAVE workspace messages.
         When authentication is implemented we need to verify
         that the user can join the selected workspace.
         When authentication is implemented we need to reply
         the client if the join failed.
     """
     if not is_binary:
         message = json.loads(payload)
         if message['action'] == 'JOIN_WORKSPACE':
             if 'workspace' not in message or 'token' not in message:
                 logger.warning('Invalid join workspace message: '
                                '{}'.format(message))
                 self.sendClose()
                 return
             signer = itsdangerous.TimestampSigner(app.config['SECRET_KEY'],
                                                   salt="websocket")
             try:
                 workspace_id = signer.unsign(message['token'], max_age=60)
             except itsdangerous.BadData as e:
                 self.sendClose()
                 logger.warning('Invalid websocket token for workspace '
                                '{}'.format(message['workspace']))
                 logger.exception(e)
             else:
                 with app.app_context():
                     workspace = Workspace.query.get(int(workspace_id))
                 if workspace.name != message['workspace']:
                     logger.warning(
                         'Trying to join workspace {} with token of '
                         'workspace {}. Rejecting.'.format(
                             message['workspace'], workspace.name))
                     self.sendClose()
                 else:
                     self.factory.join_workspace(self, message['workspace'])
         if message['action'] == 'LEAVE_WORKSPACE':
             self.factory.leave_workspace(self, message['workspace'])
Пример #24
0
def _show_progress(msg, percentage):
    try:
        sys.stdout.write('{}: {}%\r'.format(msg, percentage))
        sys.stdout.flush()
    except IOError:
        logger.warning("Unable to write progress to stdout")
Пример #25
0
    def judge(cls,
              language_config,
              src,
              max_cpu_time,
              max_memory,
              test_case_id=None,
              test_case=None,
              spj_version=None,
              spj_config=None,
              spj_compile_config=None,
              spj_src=None,
              output=False,
              io_mode=None):
        if not io_mode:
            io_mode = {"io_mode": ProblemIOMode.standard}  #标准I/O

        if not (test_case or test_case_id) or (test_case and test_case_id):
            raise JudgeClientError("invalid parameter")
        # init
        compile_config = language_config.get("compile")  #compile config info
        run_config = language_config["run"]  #run config info
        submission_id = uuid.uuid4().hex

        #print("############submission_id = ",submission_id)

        is_spj = spj_version and spj_config

        if is_spj:
            spj_exe_path = os.path.join(
                SPJ_EXE_DIR,
                spj_config["exe_name"].format(spj_version=spj_version))
            # spj src has not been compiled
            if not os.path.isfile(spj_exe_path):
                logger.warning(
                    "%s does not exists, spj src will be recompiled")
                cls.compile_spj(spj_version=spj_version,
                                src=spj_src,
                                spj_compile_config=spj_compile_config)

        print("!!!!!test_case =", test_case)
        init_test_case_dir = bool(test_case)
        with InitSubmissionEnv(JUDGER_WORKSPACE_BASE,
                               submission_id=str(submission_id),
                               init_test_case_dir=init_test_case_dir) as dirs:
            submission_dir, test_case_dir = dirs

            test_case_dir = test_case_dir or os.path.join(
                TEST_CASE_DIR, test_case_id)

            #print("@@@@@test_case_dir = ",test_case_dir)

            if compile_config:
                src_path = os.path.join(submission_dir,
                                        compile_config["src_name"])

                # write source code into file
                with open(src_path, "w",
                          encoding="utf-8") as f:  #创建Main.java文件,并把源代码src写入该文件
                    f.write(src)
                os.chown(src_path, COMPILER_USER_UID, 0)  #更改属主和属组
                os.chmod(src_path, 0o400)  #更改权限为只读

                # compile source code, return exe file path
                exe_path = Compiler().compile(compile_config=compile_config,
                                              src_path=src_path,
                                              output_dir=submission_dir)
                try:
                    # Java exe_path is SOME_PATH/Main, but the real path is SOME_PATH/Main.class
                    # We ignore it temporarily
                    os.chown(exe_path, RUN_USER_UID, 0)  #更改文件的属主和属组
                    os.chmod(exe_path, 0o500)  #更改可执行文件的权限
                except Exception:
                    pass
            else:
                exe_path = os.path.join(
                    submission_dir, run_config["exe_name"])  #this is for spj
                with open(exe_path, "w", encoding="utf-8") as f:
                    f.write(src)

            if init_test_case_dir:  #如果test_case参数没有制定的话,需要生成test_case
                info = {
                    "test_case_number": len(test_case),
                    "spj": is_spj,
                    "test_cases": {}
                }
                # write test case
                for index, item in enumerate(test_case):
                    index += 1
                    item_info = {}

                    input_name = str(index) + ".in"
                    item_info["input_name"] = input_name
                    input_data = item["input"].encode("utf-8")
                    item_info["input_size"] = len(input_data)

                    with open(os.path.join(test_case_dir, input_name),
                              "wb") as f:
                        f.write(input_data)
                    if not is_spj:
                        output_name = str(index) + ".out"
                        item_info["output_name"] = output_name
                        output_data = item["output"].encode("utf-8")
                        item_info["output_md5"] = hashlib.md5(
                            output_data).hexdigest()
                        item_info["output_size"] = len(output_data)
                        item_info["stripped_output_md5"] = hashlib.md5(
                            output_data.rstrip()).hexdigest()

                        with open(os.path.join(test_case_dir, output_name),
                                  "wb") as f:
                            f.write(output_data)
                    info["test_cases"][index] = item_info
                with open(os.path.join(test_case_dir, "info"), "w") as f:
                    json.dump(info, f)

            judge_client = JudgeClient(run_config=language_config["run"],
                                       exe_path=exe_path,
                                       max_cpu_time=max_cpu_time,
                                       max_memory=max_memory,
                                       test_case_dir=test_case_dir,
                                       submission_dir=submission_dir,
                                       spj_version=spj_version,
                                       spj_config=spj_config,
                                       output=output,
                                       io_mode=io_mode)
            run_result = judge_client.run()

            return run_result
Пример #26
0
 def get_document(self, doc_id):
     try:
         return self.__workspace.get(doc_id)
     except ResourceNotFound:
         logger.warning(u"Document {} was not found in CouchDB for Workspace {}".format(doc_id, self.__ws_name))
         return {}