예제 #1
0
    def location_exists(self, given_location_id, try_cached):
        loc_exists = False
        loc_array = None

        utils.d_print(4, "(given_location_id): ", given_location_id)

        try:

            # first check if we can try the cached array
            if not (try_cached and self.cached_locations):
                # if try_cached is set to false OR if the cached_locations are empty, then refresh.
                self.cached_locations = self.api_outbound.get_locations(
                    self.org_id, [], [])

            loc_array = self.cached_locations

            print(loc_array)
            for location in loc_array:
                if given_location_id == location['id']:
                    loc_exists = True

            utils.d_print(1, "loc_exists: ", loc_exists)

        except:
            utils.print_exception(1)

        return loc_exists
예제 #2
0
    def __generate_epc_class(self, gtin, lot):
        try:
            gtin_str = str(gtin)
            utils.d_print(8, "GTINisDigit: ", gtin_str.isdigit())
            if gtin_str.isdigit():
                # this is a pure GTIN
                prefix_str = ''
                ret_str = 'urn:epc:class:lgtin:'
                for company_prefix in self.company_prefix_list:
                    prefix_str = str(company_prefix)
                    if prefix_str in gtin_str:
                        ret_str += prefix_str + '.'
                        break
                utils.d_print(8, "gtin_str: ", gtin_str)
                utils.d_print(8, "prefix_str: ", prefix_str)

                indicator, itemref = gtin_str[:-1].split(prefix_str)
                ret_str += indicator + itemref + '.'
                ret_str += lot
            else:
                # we have an IFT id
                prefix_itemref = gtin_str.split(':class:')[1]
                ret_str = 'urn:ibm:ift:product:lot:class:' + prefix_itemref + '.' + str(lot)
            utils.d_print(5, "GTIN, Lot, EPC Class: ", gtin, lot, ret_str)
            return ret_str
        except:
            utils.print_exception(1)        
예제 #3
0
def get_unix_mgr(server, protocol):
    """Get secured manager for UNIX OS.
    'server' is the mail server name.
    'protocol' is the used POP3 connection protocol."""
    try :
        from gnomekeyringmgr import GnomeKeyringMgr
        return GnomeKeyringMgr('MailDispatcher', server, protocol)
    except ImportError, ex:
        utils.print_exception(ex, 
                              _('Secure storage manager initialization error'))
예제 #4
0
    def __generate_biztxn_list(self, biz_txn_params):
        USE_GS1_TYPES = True  # for now, we will support only GS1 types
        try:
            utils.d_print(7, "biztxn params passed:", biz_txn_params)
            return_str = ""
            if biz_txn_params != None and biz_txn_params != "" :
                x = json.loads(biz_txn_params)
                embed_str = ""
                for biztxn in x:
                    if 'po' in biztxn:
                        if USE_GS1_TYPES:
                            embed_str += '<bizTransaction type= "urn:epcglobal:cbv:btt:po">'
                            # FORMAT: urn:epcglobal:cbv:bt:1234567890123:T1234 
                            embed_str += "urn:epcglobal:cbv:bt:" + self.headquarters_gln + ":" + biztxn['po']
                        else:
                            # urn:ibm:ift:bt:<Company Prefix>.<Location Reference>.<Transaction Id>
                            # Note that head quarters GLN here (and in the config file is not fully qualified)
                            # i.e., urn:ibm:ift:location:loc:21345589.HQ is represented as 21345589.HQ only
                            embed_str += '<bizTransaction>'
                            embed_str += "urn:ibm:ift:bt:" + self.headquarters_gln + "." + biztxn['po']
                        embed_str += "</bizTransaction>"
                    elif 'da' in biztxn:
                        if USE_GS1_TYPES:
                            embed_str += '<bizTransaction type= "urn:epcglobal:cbv:btt:desadv">'
                            # FORMAT: urn:epcglobal:cbv:bt:1234567890123:T1234 
                            embed_str += "urn:epcglobal:cbv:bt:" + self.headquarters_gln + ":" + biztxn['da']
                        else:
                            # urn:ibm:ift:bt:<Company Prefix>.<Location Reference>.<Transaction Id>
                            # Note that head quarters GLN here (and in the config file is not fully qualified)
                            # i.e., urn:ibm:ift:location:loc:21345589.HQ is represented as 21345589.HQ only
                            embed_str += '<bizTransaction>' # type=" + ' "urn:epcglobal:cbv:btt:desadv">'
                            embed_str += "urn:ibm:ift:bt:" + self.headquarters_gln + "." + biztxn['da']
                        embed_str += '</bizTransaction>'
                    elif 'prodorder' in biztxn:
                        if USE_GS1_TYPES:
                            embed_str += '<bizTransaction type= "urn:epcglobal:cbv:btt:prodorder">'
                            # FORMAT: urn:epcglobal:cbv:bt:1234567890123:T1234 
                            embed_str += "urn:epcglobal:cbv:bt:" + self.headquarters_gln + ":" + biztxn['prodorder']
                        else:
                            # urn:ibm:ift:bt:<Company Prefix>.<Location Reference>.<Transaction Id>
                            # Note that head quarters GLN here (and in the config file is not fully qualified)
                            # i.e., urn:ibm:ift:location:loc:21345589.HQ is represented as 21345589.HQ only
                            embed_str += '<bizTransaction>' # type=" + ' "urn:epcglobal:cbv:btt:prodorder">'
                            embed_str += "urn:ibm:ift:bt:" + self.headquarters_gln + "." + biztxn['prodorder']
                        embed_str += '</bizTransaction>'
                    else:
                        utils.d_print(2, "Unknown Biz Txn Type", biz_txn_params)

                if embed_str != "":
                    return_str = '<bizTransactionList>' + embed_str + '</bizTransactionList>'

            return return_str
        except:
            utils.print_exception(1)       
        return
예제 #5
0
def _cochlear_trim_sai_marginals(filename_and_indexes):
    try:
        filename, norm_segstart, norm_segend, audio_id, NAP_detail = filename_and_indexes
        sai_video_filename = '{}_sai_video_{}'.format(filename, NAP_detail)
        if os.path.isfile('{}.npy'.format(sai_video_filename)):
            return sai_video_filename

        if NAP_detail == 'high':
            try:
                NAP = utils.csv_to_array(filename + 'cochlear' + NAP_detail)
            except:
                NAP = brain.cochlear(filename,
                                     stride=1,
                                     rate=44100,
                                     apply_filter=0,
                                     suffix='cochlear' + NAP_detail)
        if NAP_detail == 'low':
            try:
                NAP = utils.csv_to_array(filename + 'cochlear' + NAP_detail)
            except:
                NAP = brain.cochlear(
                    filename,
                    stride=IO.NAP_STRIDE,
                    rate=IO.NAP_RATE,
                    apply_filter=0,
                    suffix='cochlear' + NAP_detail
                )  # Seems to work best, in particular when they are all the same.

        num_channels = NAP.shape[1]
        input_segment_width = 2048
        sai_params = CreateSAIParams(num_channels=num_channels,
                                     input_segment_width=input_segment_width,
                                     trigger_window_width=input_segment_width,
                                     sai_width=1024)

        sai = pysai.SAI(sai_params)

        NAP = utils.trim_right(NAP[np.int(np.rint(NAP.shape[0] *
                                                  norm_segstart)):np.
                                   int(np.rint(NAP.shape[0] * norm_segend))],
                               threshold=.05)
        sai_video = [
            np.copy(sai.RunSegment(input_segment.T))
            for input_segment in utils.chunks(NAP, input_segment_width)
        ]
        del NAP
        np.save(sai_video_filename,
                np.array([sai_rectangles(frame) for frame in sai_video]))
        return sai_video_filename

    except:
        print utils.print_exception(
            'Calculation SAI video failed for file {}, NAP detail {}'.format(
                filename, NAP_detail))
        return False
예제 #6
0
    def push_to_bts(self):
        company_prefixes = []
        inbound_api = None

        try:
            # 1. load the workbook
            wb = load_workbook(filename = self.filename)

            # 2. set up the inbound API
            org = organization.Organization("Name", utils.global_args.client)
            org_id = org.get_id()
            inbound_api = api_inbound.APIInbound(5, org_id, utils.global_args.env, utils.global_args.header_entitled_org)

            # get company prefixes, head quarters GLN, etc.
            org_params = org.get_properties()
            # if we are asked to use a different HQ GLN, use it.
            if utils.global_args.hq_gln:
                org_params['hq_gln'] = utils.global_args.hq_gln

            # 3. start parsing the excel sheet
            #     a. first gather global params
            sheet_names = wb.sheetnames
            utils.d_print(2, "Sheet Names in the Excel Sheet: ", sheet_names)


            # if arguments indicate specific rows to be pushed, push the same.
            if utils.global_args.specific_rows:
                self.specific_rows = utils.global_args.specific_rows.split(',')
            else:
                self.specific_rows = None

            if 'if' in utils.global_args.isheets:
                # b. next push if there are any facilities
                facilities = wb['Facilities']
                self.parse_and_push_worksheet(org_params, facilities, 'Facility', inbound_api)

            if 'ip' in utils.global_args.isheets:
                # c. next push any products   
                products = wb['Products']
                self.parse_and_push_worksheet(org_params, products, 'Product',inbound_api)

            if 'ie' in utils.global_args.isheets:
                # d. next push any events
                events = wb['Events']
                self.parse_and_push_worksheet(org_params, events, 'Event',inbound_api)

            if 'ipl' in utils.global_args.isheets:
                # e. next push any events
                payloads = wb['Payloads']
                self.parse_and_push_worksheet(org_params, payloads, 'Payload',inbound_api)
        except:
            utils.print_exception(1)

            return
예제 #7
0
 def __generate_time_in_iso_format(self, given_time):
     try:
         # '2020-01-15:23:10' to '2013-06-08T14:58:56.591Z'
         x = given_time.split(':')
         if len(x) != 3:
             utils.d_print(0, "Input Time: ", given_time)
             utils.quit("Time not in required format!  Format: YYYY-MM-DD:HH:MM")
         iso_time = x[0] + 'T' + x[1] + ':' + x[2] + ':00.000Z'
         return iso_time
     except:
         utils.print_exception(1)
예제 #8
0
 def __generate_bizloc(self, gln):
     try:
         utils.d_print(7, "bizloc gln passed:", gln)
         return_str = '<bizLocation><id>'
         if "urn:ibm:ift:location" not in str(gln):
             return_str += self.__gln_2_epcgln(gln)
         else:
             return_str += gln
         return_str += '</id></bizLocation>'
         return return_str
     except:
         utils.print_exception(1)            
예제 #9
0
 def __generate_bizstep(self, bizstep):
     try:
         utils.d_print(7, "bizstep value passed:", bizstep)
         return_str = '<bizStep>'
         if "http:" not in bizstep:
             return_str += 'urn:epcglobal:cbv:bizstep:' + bizstep
         else:
             return_str += bizstep
         return_str += '</bizStep>'
         return return_str
     except:
         utils.print_exception(1)            
예제 #10
0
 def __generate_epc_serial(self, gtin_str, serial):
     try:
         return_str = ''
         if 'urn:ibm:ift:' in gtin_str:
             # we have an IFT id
             prefix_itemref = gtin_str.split(':class:')[1]
             return_str = 'urn:ibm:ift:product:serial:obj:' + prefix_itemref + '.' + str(serial)
         else:
             utils.quit("GS1 Serials not yet implemented.")
         return return_str
     except:
         utils.print_exception(1)
예제 #11
0
def load_app_settings():
    """ Load setting from configuration file. """
    app_path = get_app_path()
    if not os.access(app_path, os.F_OK):
        return (None, None, None)
    pkl_file = open(app_path, 'rb')
    try :
        server_info = pickle.load(pkl_file)
        deletion_filters = pickle.load(pkl_file)
        storing_filters = pickle.load(pkl_file)
    except (EOFError, IndexError), ex:
        utils.print_exception(ex, _('Settings loading error'))
        return (None, None, None)
예제 #12
0
    def __generate_agg_xml(self):
        try:

            self.xml_string += "<AggregationEvent>"
            self.__add_attribute('eventTime', self.__generate_time_in_iso_format(self.event_params[self.EV_TIME]))
            self.__add_attribute('eventTimeZoneOffset', self.EV_DEFAULT_TIME_ZONE)

            self.xml_string += "<baseExtension>"
            self.__add_attribute('eventID', self.event_params[self.EV_ID])
            self.xml_string += "</baseExtension>"

            self.xml_string += self.__generate_bizloc(self.event_params[self.EV_BIZLOC])

            # output is a pallet id etc., the parent
            # for now, only Logistic Units are supported as parents;  infact, only 1 logistic unit.

            x = json.loads(self.event_params[self.EV_OUTPUT])
            lu_details = x[0]
            self.__add_attribute('parentID',  self.__generate_lu(lu_details))

            if self.event_params[self.EV_INPUT_TYPE] == 'E':
                self.__generate_epc_list('childEPCs', self.event_params[self.EV_INPUT])
            elif self.event_params[self.EV_INPUT_TYPE] == 'Q':
                self.__add_attribute('childEPCs', '')
            else:
                utils.quit('Unknown Input Type ' + self.event_params[self.EV_INPUT_TYPE])

            self.__add_attribute('action', 'ADD')
            self.xml_string += self.__generate_bizstep(self.event_params[self.EV_BIZSTEP])

            self.xml_string += '<extension>'

            # generate the inputs, the children
            if self.event_params[self.EV_INPUT_TYPE] == 'Q':
                self.__generate_quantity_list('childQuantityList', self.event_params[self.EV_INPUT])

            self.xml_string += self.__generate_src_dest('source', self.event_params[self.EV_SRC])
            self.xml_string += self.__generate_src_dest('destination', self.event_params[self.EV_DEST])

            self.xml_string += '</extension>'

            # add biz txn documents here
            self.xml_string += self.__generate_biztxn_list(self.event_params[self.EV_BIZ_TXN_TYPE_LIST])

            self.xml_string += '</AggregationEvent>'

            utils.d_print(2, "## Aggregation: ", self.xml_string)        
        except:
            utils.print_exception(1)
        return
예제 #13
0
    def __gln_2_epcgln(self, gln):
        try:
            return_str = "urn:epc:id:sgln:"

            for company_prefix in self.company_prefix_list:
                if str(company_prefix) in str(gln):
                    return_str += str(company_prefix) + '.'
                    break

            location_ref = str(gln).split(str(company_prefix))[1]
            return_str += location_ref[:-1]
            return_str += '.0'
            return return_str
        except:
            utils.print_exception(1)            
예제 #14
0
 def __generate_lu(self, lu_details):
     try:
         utils.d_print(7, "LU Gen", lu_details)
         return_str = '' 
         if 'lu' in lu_details:
             lud = lu_details['lu']
             if 'urn:ibm:ift:' in lud:
                 return_str = lud
             else:
                 utils.quit("GS1 LUs not yet implemented.")
         else:
             utils.d_print(7, "LU Generation: ", lu_details)
             utils.quit("'lu' key not present.")
         return return_str
     except:
         utils.print_exception(1)
예제 #15
0
    def __generate_xfm_xml(self):
        try:
            self.xml_string += "<extension> <TransformationEvent>"
            self.__add_attribute('eventTime', self.__generate_time_in_iso_format(self.event_params[self.EV_TIME]))
            self.__add_attribute('eventTimeZoneOffset', self.EV_DEFAULT_TIME_ZONE)

            self.xml_string += "<baseExtension>"
            self.__add_attribute('eventID', self.event_params[self.EV_ID])
            self.xml_string += "</baseExtension>"

            self.xml_string += self.__generate_bizloc(self.event_params[self.EV_BIZLOC])
            self.xml_string += self.__generate_bizstep(self.event_params[self.EV_BIZSTEP])

            # input for transformation
            if self.event_params[self.EV_INPUT_TYPE] == 'Q':
                self.__generate_quantity_list('inputQuantityList', self.event_params[self.EV_INPUT])
            elif self.event_params[self.EV_INPUT_TYPE] == 'E':
                self.__generate_epc_list('inputEPCList', self.event_params[self.EV_INPUT])
            else:
                utils.quit('Unknown Input Type ' + self.event_params[self.EV_INPUT_TYPE])

            # output for transformation
            if self.event_params[self.EV_OUTPUT_TYPE] == 'Q':
                self.__generate_quantity_list('outputQuantityList', self.event_params[self.EV_OUTPUT])
            elif self.event_params[self.EV_OUTPUT_TYPE] == 'E':
                self.__generate_epc_list('outputEPCList', self.event_params[self.EV_OUTPUT])
            else:
                utils.quit('Unknown Output Type ' + self.event_params[self.EV_OUTPUT_TYPE])


            self.xml_string += "<extension>"
            self.xml_string += self.__generate_src_dest('source', self.event_params[self.EV_SRC])
            self.xml_string += self.__generate_src_dest('destination', self.event_params[self.EV_DEST])
            self.xml_string += "</extension>"

            # add biz txn documents here
            self.xml_string += self.__generate_biztxn_list(self.event_params[self.EV_BIZ_TXN_TYPE_LIST])

            self.xml_string += "</TransformationEvent></extension> "

            utils.d_print(2, "## Transformation: ", self.xml_string)        
        except:
            utils.print_exception(1)
        return
예제 #16
0
    def __generate_obs_xml(self):
        try:
            self.xml_string += "<ObjectEvent>"
            self.__add_attribute('eventTime', self.__generate_time_in_iso_format(self.event_params[self.EV_TIME]))
            self.__add_attribute('eventTimeZoneOffset', self.EV_DEFAULT_TIME_ZONE)

            self.xml_string += "<baseExtension>"
            self.__add_attribute('eventID', self.event_params[self.EV_ID])
            self.xml_string += "</baseExtension>"

            self.xml_string += self.__generate_bizloc(self.event_params[self.EV_BIZLOC])
            self.xml_string += self.__generate_bizstep(self.event_params[self.EV_BIZSTEP])

            self.__add_attribute('action', 'OBSERVE')

            if self.event_params[self.EV_INPUT_TYPE] == 'E':
                self.__generate_epc_list('epcList', self.event_params[self.EV_INPUT])
            elif self.event_params[self.EV_INPUT_TYPE] == 'Q':
                self.__add_attribute('epcList', '')
            else:
                utils.quit('Unknown Input Type ' + self.event_params[self.EV_INPUT_TYPE])

            self.xml_string += '<extension>'

            # input for observation
            if self.event_params[self.EV_INPUT_TYPE] == 'Q':
                self.__generate_quantity_list('quantityList', self.event_params[self.EV_INPUT])
            elif self.event_params[self.EV_INPUT_TYPE] != 'E':
                utils.quit('Unknown Input Type ' + self.event_params[self.EV_INPUT_TYPE])
            
            self.xml_string += self.__generate_src_dest('source', self.event_params[self.EV_SRC])
            self.xml_string += self.__generate_src_dest('destination', self.event_params[self.EV_DEST])

            self.xml_string += '</extension>'

            # add biz txn documents here
            self.xml_string += self.__generate_biztxn_list(self.event_params[self.EV_BIZ_TXN_TYPE_LIST])

            self.xml_string += '</ObjectEvent>'

            utils.d_print(2, "## Observation: ", self.xml_string)        
        except:
            utils.print_exception(1)
        return
예제 #17
0
 def __generate_quantity_list(self, qty_list_name, qty_list):
     try:
         utils.d_print(7, "Gen Qty list: ", qty_list)
         self.xml_string += '<' + qty_list_name + '>'
         x = json.loads(qty_list)
         utils.d_print(8, "Qty List JSON String: ", x)
         for x_item in x:
             self.xml_string += '<quantityElement>'
             utils.d_print(7, "x_item[GTIN]: ", x_item['gtin'])
             utils.d_print(7, "x_item[LOT   ]: ", x_item['lot'])
             self.__add_attribute('epcClass', self.__generate_epc_class(x_item['gtin'], x_item['lot']))
             self.__add_attribute('quantity', x_item['qty'])
             self.__add_attribute('uom', x_item['uom'])
             self.xml_string += '</quantityElement>'
             utils.d_print(7, "XML_String:", self.xml_string)
         self.xml_string += '</' + qty_list_name + '>'
     except:
         utils.print_exception(1)
     return
예제 #18
0
    def __generate_src_dest(self, loc_type, gln):
        try:
            return_str = ''
                    
            if gln != None:
                return_str += '<' + loc_type + 'List>'
                return_str += '<' + loc_type + ' type="urn:epcglobal:cbv:sdt:owning_party">'

                if "urn:ibm:ift:location" in str(gln):
                    return_str += gln
                else:
                    return_str += self.__gln_2_epcgln(gln)
                
                return_str += '</' + loc_type + '>'
                return_str += '</' + loc_type + 'List>'

            return return_str
        except:
            utils.print_exception(1)
예제 #19
0
    def __generate_com_xml(self):
        try:
            self.xml_string += "<ObjectEvent>"

            # convert time to required string and add
            self.__add_attribute('eventTime', self.__generate_time_in_iso_format(self.event_params[self.EV_TIME]))
            self.__add_attribute('eventTimeZoneOffset', self.EV_DEFAULT_TIME_ZONE)


            self.xml_string += "<baseExtension>"
            self.__add_attribute('eventID', self.event_params[self.EV_ID])
            self.xml_string += "</baseExtension>"

            self.__add_attribute('action', 'ADD')

            self.xml_string += self.__generate_bizstep(self.event_params[self.EV_BIZSTEP])
            self.xml_string += self.__generate_bizloc(self.event_params[self.EV_BIZLOC])

            if self.event_params[self.EV_OUTPUT_TYPE] == 'Q':
                self.__add_attribute('epcList', '')
            else:
                utils.quit("Add EPCs in Commission event to handle type 'E'")

            self.xml_string += "<extension>"

            if self.event_params[self.EV_OUTPUT_TYPE] == 'Q':
                self.__generate_quantity_list('quantityList', self.event_params[self.EV_OUTPUT])

            # add source and desination lists here
            self.xml_string += self.__generate_src_dest('source', self.event_params[self.EV_SRC])
            self.xml_string += self.__generate_src_dest('destination', self.event_params[self.EV_DEST])
            self.xml_string += "</extension>"

            # add biz txn documents here
            self.xml_string += self.__generate_biztxn_list(self.event_params[self.EV_BIZ_TXN_TYPE_LIST])

            self.xml_string += "</ObjectEvent>"

            utils.d_print(2, "## Commission: ", self.xml_string)
        except:
            utils.print_exception(1)
        return    
def inc_nb_votes():
    """ Increment the number of votes of a couple question/answer.
    """
    try:
        args = utils.fieldsToValuesPOST(['question_id', 'answer_id'], request)
        arg_question_id = args['question_id']
        arg_answer_id = args['answer_id']
        answer_binding = data.AnswersBinding.query.filter_by(question_id=arg_question_id,
                                                             answer_id=arg_answer_id).first()
        if answer_binding:
            question = data.Questions.query.filter_by(id=arg_question_id).first()
            if question:
                now = datetime.now()
                if not (question.datetime_start < now and question.datetime_expiry > now):
                    return json.dumps({"success": False, "message": "Question expired"})
        answer_binding.nb_votes += 1
        db.session.commit()
        return json.dumps({"success": True, "message": answer_binding.nb_votes})
    except Exception as e:
        utils.print_exception(e)
        return json.dumps({"success": False, "message": str(e)})
def create_question():
    try:
        args = utils.fieldsToValuesPOST(['question', 'answers',
                                         'datetime_start', 'datetime_expiry'], request)
        arg_question = args['question']
        arg_answers = args['answers']
        arg_datetime_start = args['datetime_start']
        arg_datetime_expiry = args['datetime_expiry']

        arg_answers = [a for a in arg_answers.split(';')]

        does_question_exist = data.Questions.query.filter_by(content=arg_question).first()
        if not does_question_exist:
            question = data.Questions(arg_question, arg_datetime_start, arg_datetime_expiry)
            db.session.add(question)
            db.session.commit()
            question_id = question.id
        else:
            question_id = does_question_exist.id

        for answer in arg_answers:
            does_answer_exist = data.Answers.query.filter_by(content=answer).first()
            if not does_answer_exist:
                ans = data.Answers(answer)
                db.session.add(ans)
                db.session.commit()
                answer_id = ans.id
            else:
                answer_id = does_answer_exist.id

            does_answer_binding_exist = data.AnswersBinding.query.filter_by(question_id=question_id,
                                                                            answer_id=answer_id).first()
            if not does_answer_binding_exist:
                db.session.add(data.AnswersBinding(question_id, answer_id))
                db.session.commit()
        db.session.commit()
        return json.dumps({"success": True, "message": "Question and answers correctly created."})
    except Exception as e:
        utils.print_exception(e)
        return json.dumps({"success": False, "message": str(e)})
예제 #22
0
def _cochlear_trim_sai_marginals(filename_and_indexes):
    try:
        filename, norm_segstart, norm_segend, audio_id, NAP_detail = filename_and_indexes
        sai_video_filename = '{}_sai_video_{}'.format(filename, NAP_detail)
        if os.path.isfile('{}.npy'.format(sai_video_filename)):
          return sai_video_filename

        if NAP_detail == 'high':
            try: 
                NAP = utils.csv_to_array(filename+'cochlear'+NAP_detail)
            except:
                NAP = brain.cochlear(filename, stride=1, rate=44100, apply_filter=0, suffix='cochlear'+NAP_detail)
        if NAP_detail == 'low':
            try: 
                NAP = utils.csv_to_array(filename+'cochlear'+NAP_detail)
            except: 
                NAP = brain.cochlear(filename, stride=IO.NAP_STRIDE, rate=IO.NAP_RATE, apply_filter=0, suffix='cochlear'+NAP_detail) # Seems to work best, in particular when they are all the same.

        num_channels = NAP.shape[1]
        input_segment_width = 2048
        sai_params = CreateSAIParams(num_channels=num_channels,
                                     input_segment_width=input_segment_width,
                                     trigger_window_width=input_segment_width,
                                     sai_width=1024)

        sai = pysai.SAI(sai_params)

        NAP = utils.trim_right(NAP[ np.int(np.rint(NAP.shape[0]*norm_segstart)) : np.int(np.rint(NAP.shape[0]*norm_segend)) ], threshold=.05)
        sai_video = [ np.copy(sai.RunSegment(input_segment.T)) for input_segment in utils.chunks(NAP, input_segment_width) ]
        del NAP        
        np.save(sai_video_filename, np.array([ sai_rectangles(frame) for frame in sai_video ]))
        return sai_video_filename

    except:
        print utils.print_exception('Calculation SAI video failed for file {}, NAP detail {}'.format(filename, NAP_detail))
        return False
예제 #23
0
 def __generate_epc_list(self, epc_name, epc_serial_params):
     try:
         utils.d_print(7, "epc values:", epc_name, epc_serial_params)
         x = json.loads(epc_serial_params)
         return_str = '<' + epc_name + '>'
         for product_details in x:
             print (product_details)
             if 'gtin' in product_details:
                 gtin_str = product_details['gtin']
                 for serial_no in product_details['serial']:
                     return_str += '<epc>'
                     return_str +=  self.__generate_epc_serial(gtin_str, serial_no)
                     return_str += '</epc>'
             elif 'lu' in product_details:
                     return_str += '<epc>'
                     return_str +=  self.__generate_lu(product_details)
                     return_str += '</epc>'
             else:
                 utils.quit("Unknown key name (not gtin or lu) in EPC details")
         return_str += '</' + epc_name + '>'
         self.xml_string += return_str
         utils.d_print(7, "return_str: ", return_str)
     except:
         utils.print_exception(1)            
예제 #24
0
    def product_exists(self, check_type, check_value, try_cached):
        product_exists = False
        product_array = None

        utils.d_print(4, "(check_type, check_value): ", check_type,
                      check_value)

        try:

            # first check if we can try the cached array
            if not (try_cached and self.cached_products):
                # if try_cached is set to false OR if the cached_locations are empty, then refresh with all products
                self.cached_products = self.api_outbound.get_products(
                    self.org_id, None, None, None)

            product_array = self.cached_products

            for product in product_array:
                #                if not isinstance(product, dict):
                #                    continue
                if check_type == 'id':
                    if product['id'] == check_value:
                        product_exists = True
                elif check_type == 'description':
                    if product['description'] == check_value:
                        product_exists = True
                else:
                    utils.d_print(1, "Uknown Checktype", check_type, " Val: ",
                                  check_value)

            utils.d_print(1, "product_exists: ", product_exists)

        except:
            utils.print_exception(1)

        return product_exists
def get_all_questions_answers():
    """ Get all running questions and answers.
    A valid question is a question whose date is superior to the current date,
    and did not expire yet.
    """
    try:
        current_date = datetime.now()

        questions = data.Questions.query.all()
        # I do not use where() clause because I did not find how to call it with flask-sqlalchemy...
        questions = [question for question in questions
                     if question.datetime_start < current_date
                     and question.datetime_expiry > current_date]

        ret = []
        for question in questions:
            bindings = data.AnswersBinding.query.filter_by(question_id=question.id)
            json_answers = []
            for binding in bindings:
                answer = data.Answers.query.filter_by(id=binding.answer_id).first()
                answer_obj = {}
                answer_obj["content"] = answer.content
                answer_obj["answer_id"] = answer.id
                json_answers.append(answer_obj)
            question_obj = {}
            question_obj["datetime_start"] = str(question.datetime_start)
            question_obj["datetime_expiry"] = str(question.datetime_expiry)
            question_obj["answers"] = json_answers
            question_obj["content"] = question.content
            question_obj["question_id"] = question.id
            ret.append(question_obj)
        json_ret = json.dumps(ret)
        return json.dumps({"success": True, "message": json_ret})
    except Exception as e:
        utils.print_exception(e)
        return json.dumps({"success": False, "message": str(e)})
예제 #26
0
    def parse_and_push_worksheet(self, org_params, b_rows, data_type, inbound_api):

        # criteria that determines if a row is to be pushed
        def row_to_be_skipped(row_no):
            utils.d_print(6, "Row: ", row_no, "specific_rows:", self.specific_rows)
            ret_val = False
            # skip the first row; it is the names of various columns; we start with row_no 1.
            if row_no == 0:
                ret_val = True
            elif self.specific_rows:  # we check if this is not None
                if str(row_no) not in self.specific_rows:
                    ret_val = True
            utils.d_print(6, "Row: ", row_no, "Skip?:", ret_val)
            return ret_val

        try:
            b_row = {}            
            row_no = 0
            for row in b_rows.iter_rows(values_only=True):
                # if this row is not to be pushed based on certain criteria, increment and continue
                if row_to_be_skipped(row_no):
                    row_no += 1 
                    continue

                 # this is a row to be pushed.
                index = 0
                ## we check for the first column.
                if row[0] is not None:
                    utils.d_print (2, "-------> ", row)
                    for cell_value in row:
                        # if cell_value is not None: 
                        # the value of index should match the values of F_PARAMS in datagen
                        # for Strings, we have a flag to control if we want to encode as unicode bytes
                        unicode_encode = False
                        if unicode_encode and isinstance(cell_value, str):
                            b_row[index] = cell_value.encode('utf-8')
                        else:
                            b_row[index] = cell_value
                        index += 1
                    utils.d_pprint(3, "Business Format Row: ", b_row)

                    # we now have read the "business row"
                    # this is where we convert the row we just read using the adapter to the row
                    # that we want.
                    if data_type == 'Facility':
                        d_row = self.adapter.convert_facility_row(b_row)
                        self._check_row_length(row_no+1, len(d_row), datagen.N_FACILITY_PARAMS)
                        x = datagen.Facility(org_params, d_row)
                    elif data_type == 'Product':
                        d_row = self.adapter.convert_product_row(b_row)
                        self._check_row_length(row_no+1, len(d_row), datagen.N_PRODUCT_PARAMS)
                        x = datagen.Product(org_params, d_row)
                    elif data_type == 'Event':
                        d_row = self.adapter.convert_event_row(b_row)
                        self._check_row_length(row_no+1, len(d_row), datagen.N_EVENT_PARAMS)
                        x = datagen.Event(org_params, d_row)
                    elif data_type == 'Payload':
                        d_row = self.adapter.convert_payload_row(b_row)
                        self._check_row_length(row_no+1, len(d_row), datagen.N_PAYLOAD_PARAMS)
                        x = datagen.Payload(org_params, d_row)
                    else:
                        utils.quit('Unknown Data Type!')

                    # retrieve the XML and push it.
                    xml = x.get_xml()
                    utils.d_print(5, "xml: ", xml)
                    inbound_api.push_xml(xml)

                row_no += 1 

                print ((row_no - 1), " ", data_type, " elements pushed.")

            # there could be additional payloads left in the adapter
            # this is the case where there is no explicit payload tab.
            if self.adapter.get_built_count('Payloads') > 0:
                a_payloads = self.adapter.get_built_data('Payloads')
                payload_index = 0
                for p_payload in a_payloads:
                    self._check_row_length(payload_index+1, len(p_payload), datagen.N_PAYLOAD_PARAMS)
                    x = datagen.Payload(org_params, p_payload)
                    xml = x.get_xml()
                    utils.d_print(5, "xml: ", xml)
                    if not utils.global_args.simulate:
                        inbound_api.push_xml(xml)
        except:
            utils.print_exception(1)
예제 #27
0
class Controller:
    def __init__(self, init_state, host):
        self.state = init_state
        
        context = zmq.Context()
        
        self.publisher = context.socket(zmq.PUB)
        self.publisher.bind('tcp://*:{}'.format(IO.STATE))
        
        self.event = context.socket(zmq.PUB)
        self.event.bind('tcp://*:{}'.format(IO.EVENT))

        snapshot = context.socket(zmq.ROUTER)
        snapshot.bind('tcp://*:{}'.format(IO.SNAPSHOT))

        self.association = context.socket(zmq.REQ)
        self.association.connect('tcp://{}:{}'.format(host, IO.ASSOCIATION))

        incoming = context.socket(zmq.PULL)
        incoming.bind('tcp://*:{}'.format(IO.EXTERNAL))

        poller = zmq.Poller()
        poller.register(incoming, zmq.POLLIN)
        poller.register(snapshot, zmq.POLLIN)

        while True:
            events = dict(poller.poll())
            
            if incoming in events:
                self.parse(incoming.recv_json())
                 
            if snapshot in events:
                address, _, message = snapshot.recv_multipart()
                snapshot.send_multipart([ address, 
                                          b'',
                                          dumps(self.state) ])

    def parse(self, message):
        print '[self.] received: {}'.format(message)

        black_list = []

        try:
            # if 'learnwav' in message or 'respondwav_single' in message or 'respondwav_sentence' in message:
            #     _, filename = message.split()
            #     if filename in black_list:
            #         print 'SKIPPING BAD FILE {}'.format(filename)
            #         return

            if message == 'dream':
                self.state['memoryRecording'] = False
                self.state['autorespond_sentence'] = False
                self.state['ambientSound'] = False
                self.state['autolearn'] = False
                self.state['autorespond_single'] = False
                self.state['_audioLearningStatus'] = False
                self.state['record'] = False
                self.publisher.send_json(self.state)

                self.event.send_json({'dream': True})

            if message == 'reboot':
                utils.reboot()

            if message == 'appendCurrentSettings':
                self.association.send_pyobj([message])
                self.association.recv_pyobj()
            
            if 'getCurrentSettings' in message:
                msg, value = message.split()
                self.association.send_pyobj([msg, value])
                self.association.recv_pyobj()

            if 'i_am_speaking' in message:
                _, value = message.split()
                self.state['i_am_speaking'] = value in ['True', '1']

            if 'enable_say_something' in message:
                _, value = message.split()
                self.state['enable_say_something'] = value in ['True', '1']
            
            if 'last_segment_ids' in message:
                the_ids = message[17:]
                self.event.send_json({'last_segment_ids': loads(the_ids) })
                
            if 'last_most_significant_audio_id' in message:
                audio_id = message[31:]
                self.event.send_json({'last_most_significant_audio_id': audio_id })
            
            if message == 'clear play_events':
                self.event.send_json({'clear play_events' : 'True'})

            if 'calculate_cochlear' in message:
                _, wav_file = message.split()
                t0 = time.time()
                try:
                    brain.cochlear(utils.wait_for_wav(wav_file), stride=IO.NAP_STRIDE, rate=IO.NAP_RATE)
                except:
                    print 'SHOULD {} BE BLACKLISTED?'.format(wav_file)
                    black_list.append(wav_file)
                print 'Calculating cochlear neural activation patterns took {} seconds'.format(time.time() - t0)
            
            if message == 'evolve':
                self.state['memoryRecording'] = False
                self.state['autorespond_sentence'] = False
                self.state['autolearn'] = False
                self.state['autorespond_single'] = False
                self.state['_audioLearningStatus'] = False
                self.state['record'] = False
                self.publisher.send_json(self.state)
                
                self.association.send_pyobj(['evolve'])
                self.association.recv_pyobj()

            if 'register' in message and 'BRAIN' in message:
                _, name, free = message.split()
                self.state['brains'][name] = int(free)

            if 'fullscreen' in message:
                _, value = message.split()
                self.event.send_json({ 'fullscreen': value in ['True', '1'] })

            if 'display2' in message:
                _, value = message.split()
                self.event.send_json({ 'display2': value in ['True', '1'] })

            if message == 'startrec':
                self.state['record'] = True

            if message == 'stoprec':
                self.state['record'] = False

            if 'facerecognition' in message:
                _, value = message.split()
                self.state['facerecognition'] = value in ['True', '1']

            if 'print_me' in message:
                self.event.send_json({ 'print_me': message[7:] })

            if 'play_id' in message:
                self.event.send_json({ 'play_id': message[8:] })

            if 'testSentence' in message:
                self.event.send_json({ 'testSentence': message[13:] })

            if 'assoc_setPlotting' in message:
                self.event.send_json({ 'assoc_setPlotting': message[18:] in ['True', '1'] })

            if 'assoc_setParam' in message:
                self.event.send_json({ 'assoc_setParam': message[15:] })

            if 'respond_setParam' in message:
                self.event.send_json({ 'respond_setParam': message[17:] })

            if 'memoryRecording' in message:
                self.state['memoryRecording'] = message[16:] in ['True', '1']
               
            if '_audioLearningStatus' in message:
                self.state['_audioLearningStatus'] = message[21:] in ['True', '1']

            if 'roboActive' in message:
                self.state['roboActive'] = int(message[11:])

            if 'ambientSound' in message:
                self.state['ambientSound'] = int(message[13:])

            if 'decrement' in message:
                _, name = message.split()
                self.state['brains'][name] -= 1
                print '{} has now {} available slots'.format(name, self.state['brains'][name])

            if 'learnwav' in message:
                _, filename = message.split()
                self.event.send_json({ 'learn': True, 'filename': filename })

            if 'respondwav_single' in message:
                _, filename = message.split()
                self.event.send_json({ 'respond_single': True, 'filename': filename })

            if 'respondwav_sentence' in message:
                _, filename = message.split()
                self.event.send_json({ 'respond_sentence': True, 'filename': filename })

            if 'play_sentence' in message:
                print 'playSentence', message
                sentence = message[13:]
                self.event.send_json({ 'play_sentence':True, 'sentence': sentence })

            if 'rhyme' in message:
                _, value = message.split()
                self.event.send_json({'rhyme': value == 'True'})

            if 'urge_to_say_something' in message:
                _, value = message.split()
                self.event.send_json({'urge_to_say_something': value})

            if 'autolearn' in message:
                self.state['autolearn'] = message[10:] in ['True', '1']

            if 'autorespond_single' in message:
                self.state['autorespond_single'] = message[19:] in ['True', '1']

            if 'autorespond_sentence' in message:
                self.state['autorespond_sentence'] = message[21:] in ['True', '1']

            if 'inputLevel' in message:
                self.event.send_json({ 'inputLevel': message[11:] })

            if 'calibrateEq' in message:
                self.event.send_json({ 'calibrateEq': True })

            if 'calibrateAudio' in message:
                latency_ok = False
                try:
                    lat = open('roundtrip_latency.txt', 'r')
                    latency = float(lat.readline())
                    self.event.send_json({ 'setLatency': latency })
                    latency_ok = True
                except Exception, e:
                    print 'Something went wrong when reading latency from file.', e
                    self.event.send_json({ 'calibrateAudio': True })
                if latency_ok:
                    self.event.send_json({ 'calibrateNoiseFloor': True }) 
                if 'calibrateAudio memoryRecording' in message:
                    self.state['memoryRecording'] = True

            if 'csinstr' in message:
                self.event.send_json({ 'csinstr': message[8:] })
             
            if 'selfDucking' in message:
                self.event.send_json({ 'selfDucking': message[12:] })

            if 'zerochannels' in message:
                self.event.send_json({ 'zerochannels': message[13:] })

            if 'playfile' in message:
                self.event.send_json({ 'playfile': message[9:] })

            if 'selfvoice' in message:
                self.event.send_json({ 'selfvoice': message[10:] })

            if 'save' in message:
                self.event.send_json({ 'save': utils.brain_name() if len(message) == 4 else message[5:] })

            if 'load' in message:
                if len(message) == 4:
                    brain_name = utils.find_last_valid_brain()
                else:
                    _, brain_name = message.split()
                if brain_name:
                    self.event.send_json({ 'load': brain_name })

            self.publisher.send_json(self.state)

        except Exception as e:
            utils.print_exception('Something went wrong when parsing the message - try again.')
예제 #28
0
파일: altbrain.py 프로젝트: afcarl/self_dot
def new_learn_audio(host, debug=False):
    context = zmq.Context()

    mic = context.socket(zmq.SUB)
    mic.connect('tcp://{}:{}'.format(host, IO.MIC))
    mic.setsockopt(zmq.SUBSCRIBE, b'')

    dreamQ = context.socket(zmq.PUSH)
    dreamQ.connect('tcp://{}:{}'.format(host, IO.DREAM))


    stateQ, eventQ, brainQ = _three_amigos(context, host)

    sender = context.socket(zmq.PUSH)
    sender.connect('tcp://{}:{}'.format(host, IO.EXTERNAL))

    counterQ = context.socket(zmq.REQ)
    counterQ.connect('tcp://{}:{}'.format(host, IO.COUNTER))
    
    poller = zmq.Poller()
    poller.register(mic, zmq.POLLIN)
    poller.register(stateQ, zmq.POLLIN)
    poller.register(eventQ, zmq.POLLIN)

    audio = deque()
    NAPs = []
    wavs = []
    wav_audio_ids = {}
    NAP_hashes = {}

    audio_classifier = []
    audio_recognizer = []
    global_audio_recognizer = []
    mixture_audio_recognizer = []
    maxlen = []

    deleted_ids = []
    
    state = stateQ.recv_json()
    
    black_list = open('black_list.txt', 'a')

    audio_memory = AudioMemory()
    
    if debug:
        import matplotlib.pyplot as plt
        plt.ion()

    while True:
        events = dict(poller.poll())
        
        if stateQ in events:
            state = stateQ.recv_json()

        if mic in events:
            new_audio = utils.recv_array(mic)
            if state['record']:
                audio.append(new_audio)

        if eventQ in events:
            pushbutton = eventQ.recv_json()
            if 'learn' in pushbutton:
                try:
                    t0 = time.time()
                    filename = pushbutton['filename']
                    audio_segments = utils.get_segments(filename)

                    print 'Learning {} duration {} seconds with {} segments'.format(filename, audio_segments[-1], len(audio_segments)-1)
                    new_sentence = utils.csv_to_array(filename + 'cochlear')
                    norm_segments = np.rint(new_sentence.shape[0]*audio_segments/audio_segments[-1]).astype('int')

                    audio_ids = []
                    new_audio_hash = []
                    amps = utils.get_amps(filename)
                    most_significant_value = -np.inf
                    most_significant_audio_id = []

                    original_NAP_length = len(NAPs)
                    
                    for segment, new_sound in enumerate([ utils.trim_right(new_sentence[norm_segments[i]:norm_segments[i+1]]) for i in range(len(norm_segments)-1) ]):
                        # We filter out short, abrupt sounds with lots of noise.
                        if np.mean(new_sound) < 2 or new_sound.shape[0] == 0:
                          black_list.write('{} {}\n'.format(filename, segment))
                          print 'BLACKLISTED segment {} in file {}'.format(segment, filename)
                          continue

                        if debug:
                            utils.plot_NAP_and_energy(new_sound, plt)

                        audio_id = audio_memory.learn(new_sound, filename, [ audio_segments[segment], audio_segments[segment+1] ])

                        # START LEGACY
                        try:
                            wavs[audio_id].append(filename)
                        except:
                            wavs.append([filename])
                        wav_audio_ids[(filename, audio_id)] = [ audio_segments[segment], audio_segments[segment+1] ]
                        # END LEGACY
                        
                        audio_ids.append(audio_id)
                        if amps[segment] > most_significant_value:
                            most_significant_audio_id = audio_id
                            most_significant_value = amps[segment]

                    black_list.flush()
                    print 'AUDIO IDs after blacklisting {}'. format(audio_ids)
                    if len(audio_ids):
                        # while len(NAPs) - len(deleted_ids) > AUDIO_MEMORY_SIZE:
                        #     utils.delete_loner(counterQ, NAPs, 'audio_ids_counter', int(AUDIO_MEMORY_SIZE*PROTECT_PERCENTAGE), deleted_ids)

                        # maxlen = max([ m.shape[0] for memory in NAPs for m in memory if len(m) ])
                        # memories = [ np.ndarray.flatten(utils.zero_pad(m, maxlen)) for memory in NAPs for m in memory if len(m) ]

                        # targets = [ i for i,f in enumerate(NAPs) for k in f if len(k) ]
                        # audio_classifier = train_rPCA_SVM(memories, targets)

                        # all_hammings = [ utils.hamming_distance(new_audio_hash[i], new_audio_hash[j])
                        #                                         for i in range(len(new_audio_hash)) for j in range(len(new_audio_hash)) if i > j ]
                    
                        # print 'RHYME VALUE', np.mean(sorted(all_hammings)[int(len(all_hammings)/2):])
                        # rhyme = np.mean(sorted(all_hammings)[int(len(all_hammings)/2):]) < RHYME_HAMMERTIME

                        # sender.send_json('rhyme {}'.format(rhyme))

                        brainQ.send_pyobj(['audio_learn', filename, audio_ids, audio_memory, most_significant_audio_id, wavs, wav_audio_ids])
                        print 'Audio learned from {} in {} seconds'.format(filename, time.time() - t0)
                    else:
                        print 'SKIPPING fully blacklisted file {}'.format(filename)
                except:
                    utils.print_exception('Audio learning aborted.')

                audio.clear()

            if 'dream' in pushbutton:
                new_dream(audio_memory)
                     
            if 'save' in pushbutton:
                utils.save('{}.{}'.format(pushbutton['save'], mp.current_process().name), [ deleted_ids, NAPs, wavs, wav_audio_ids, NAP_hashes, audio_classifier, maxlen, audio_memory ])
                
            if 'load' in pushbutton:
                deleted_ids, NAPs, wavs, wav_audio_ids, NAP_hashes, audio_classifier, maxlen, audio_memory = utils.load('{}.{}'.format(pushbutton['load'], mp.current_process().name))
예제 #29
0
파일: altbrain.py 프로젝트: afcarl/self_dot
def new_respond(control_host, learn_host, debug=False):
    context = zmq.Context()
    
    eventQ = context.socket(zmq.SUB)
    eventQ.connect('tcp://{}:{}'.format(control_host, IO.EVENT))
    eventQ.setsockopt(zmq.SUBSCRIBE, b'') 

    projector = context.socket(zmq.PUSH)
    projector.connect('tcp://{}:{}'.format(control_host, IO.PROJECTOR)) 

    sender = context.socket(zmq.PUSH)
    sender.connect('tcp://{}:{}'.format(control_host, IO.EXTERNAL))

    brainQ = context.socket(zmq.PULL)
    brainQ.bind('tcp://*:{}'.format(IO.BRAIN))

    counterQ = context.socket(zmq.REQ)
    counterQ.connect('tcp://{}:{}'.format(control_host, IO.COUNTER))
    
    cognitionQ = context.socket(zmq.PUSH)
    cognitionQ.connect('tcp://{}:{}'.format(control_host, IO.COGNITION))

    association = context.socket(zmq.REQ)
    association.connect('tcp://{}:{}'.format(learn_host, IO.ASSOCIATION))

    snapshot = context.socket(zmq.REQ)
    snapshot.connect('tcp://{}:{}'.format(control_host, IO.SNAPSHOT))

    scheduler = context.socket(zmq.PUSH)
    scheduler.connect('tcp://{}:{}'.format(control_host, IO.SCHEDULER))

    dreamQ = context.socket(zmq.PULL)
    dreamQ.bind('tcp://*:{}'.format(IO.DREAM))

    snapshot.send_json('Give me state!')
    state = snapshot.recv_json()

    poller = zmq.Poller()
    poller.register(eventQ, zmq.POLLIN)
    poller.register(brainQ, zmq.POLLIN)
    poller.register(dreamQ, zmq.POLLIN)

    sound_to_face = []
    wordFace = {}
    face_to_sound = []
    faceWord = {}
    register = {}
    video_producer = {}
    voiceType1 = 1
    voiceType2 = 6
    wordSpace1 = 0.3
    wordSpaceDev1 = 0.3
    wordSpace2 = 0.1
    wordSpaceDev2 = 0.3

    audio_ids = []
    wavs = []
    wav_audio_ids = []
    NAP_hashes = {}
    most_significant_audio_id = []
    
    if debug:
        import matplotlib.pyplot as plt
        plt.ion()
    
    while True:
        events = dict(poller.poll())

        if brainQ in events:
            cells = brainQ.recv_pyobj()

            mode = cells[0]
            wav_file = cells[1]

            if wav_file not in register:
                register[wav_file] = [False, False, False]

            if mode == 'audio_learn':
                register[wav_file][0] = cells
                            
            if mode == 'video_learn':
                register[wav_file][1] = cells

            if mode == 'face_learn':
                register[wav_file][2] = cells

            if all(register[wav_file]):
                _, _, audio_ids, audio_memory, most_significant_audio_id, wavs, wav_audio_ids = register[wav_file][0]
                _, _, tarantino = register[wav_file][1]
                _, _, face_id, face_recognizer = register[wav_file][2]          
                print 'Audio - video - face recognizers related to {} arrived at responder, total processing time {} seconds'.format(wav_file, time.time() - utils.filetime(wav_file))

                for audio_id in audio_ids: # If audio_ids is empty, none of this will happen
                    video_producer[(audio_id, face_id)] = tarantino 
                    if audio_id < len(sound_to_face) and not face_id in sound_to_face[audio_id]: # sound heard before, but not said by this face 
                        sound_to_face[audio_id].append(face_id)
                    if audio_id == len(sound_to_face):
                        sound_to_face.append([face_id])

                    wordFace.setdefault(audio_id, [[face_id,0]])
                    found = 0
                    for item in wordFace[audio_id]:
                        if item[0] == face_id:
                            item[1] += 1
                            found = 1
                    if found == 0:
                        wordFace[audio_id].append([face_id,1])

                    # We can't go from a not known face to any of the sounds, that's just the way it is.
                    print 'face_id for audio segment learned', face_id
                    if face_id is not -1:
                        if face_id < len(face_to_sound) and not audio_id in face_to_sound[face_id]: #face seen before, but the sound is new
                            face_to_sound[face_id].append(audio_id)
                        if face_id == len(face_to_sound):
                            face_to_sound.append([audio_id])
                        faceWord.setdefault(face_id, [[audio_id,0]])
                        found = 0
                        for item in faceWord[face_id]:
                            if item[0] == audio_id:
                                item[1] += 1
                                found = 1
                        if found == 0:
                            faceWord[face_id].append([audio_id,1])
                            
                del register[wav_file]
                
                similar_ids = []
                for audio_id in audio_ids:
                    # I SUSPECT THIS IS WRONG, SINCE THERE IS NO SORTING OF THESE HAMMING DISTANCES IN ASSOCIATION.PY
                    new_audio_hash = audio_memory.audio_ids[audio_id][-1].crude_hash
                    similar_ids_for_this_audio_id = [ utils.hamming_distance(new_audio_hash, random.choice(h).crude_hash) for h in audio_memory.audio_ids.itervalues() ]
                    similar_ids.append(similar_ids_for_this_audio_id)

                if len(audio_ids):
                    association.send_pyobj(['analyze',wav_file,wav_audio_ids,audio_ids,wavs,similar_ids,wordFace,faceWord])
                    association.recv_pyobj()
                    sender.send_json('last_most_significant_audio_id {}'.format(most_significant_audio_id))

                cognitionQ.send_pyobj(face_recognizer) # A possiblity of recognizing a face that is not connecting to any soundfiles

                                
        if eventQ in events:
            pushbutton = eventQ.recv_json()
            if 'respond_single' in pushbutton:
                try:
                    filename = pushbutton['filename']
                    audio_segments = utils.get_segments(filename)
                    print 'Single response to {} duration {} seconds with {} segments'.format(filename, audio_segments[-1], len(audio_segments)-1)
                    new_sentence = utils.csv_to_array(filename + 'cochlear')
                    norm_segments = np.rint(new_sentence.shape[0]*audio_segments/audio_segments[-1]).astype('int')

                    segment_id = utils.get_most_significant_word(filename)

                    NAP = utils.trim_right(new_sentence[norm_segments[segment_id]:norm_segments[segment_id+1]])
           
                    if debug:            
                        plt.imshow(NAP.T, aspect='auto')
                        plt.draw()

                    best_match,_,_,_,_ = audio_memory.find(NAP)
                    soundfile = best_match.wav_file
                    segstart, segend = best_match.segment_idxs

                    voiceChannel = 1
                    speed = 1
                    amp = -3 # voice amplitude in dB
                    _,dur,maxamp,_ = utils.getSoundInfo(soundfile)
                    
                    start = 0
                    voice1 = 'playfile {} {} {} {} {} {} {} {} {}'.format(1, voiceType1, start, soundfile, speed, segstart, segend, amp, maxamp)
                    voice2 = ''

                    print 'Recognized as sound {}'.format(best_match.audio_id)

                    # sound_to_face, video_producer
                    projection = _project(best_match.audio_id, sound_to_face, NAP, video_producer)

                    scheduler.send_pyobj([[ dur, voice1, voice2, projection, FRAME_SIZE ]])
                    print 'Respond time from creation of wav file was {} seconds'.format(time.time() - utils.filetime(filename))
                except:
                    utils.print_exception('Single response aborted.')


            if 'play_sentence' in pushbutton:
                try:
                    sentence = pushbutton['sentence']
                    sentence = eval(sentence)
                    print '*** (play) Play sentence', sentence
                    start = 0 
                    nextTime1 = 0
                    play_events = []
                    for i in range(len(sentence)):
                        word_id = sentence[i]
                        soundfile = np.random.choice(wavs[word_id])
                        speed = 1

                        segstart, segend = wav_audio_ids[(soundfile, word_id)]
                        NAP = _extract_NAP(segstart, segend, soundfile)

                        amp = -3 # voice amplitude in dB
                        _,totaldur,maxamp,_ = utils.getSoundInfo(soundfile)
                        dur = segend-segstart
                        if dur <= 0: dur = totaldur
                        # play in both voices
                        voice1 = 'playfile {} {} {} {} {} {} {} {} {}'.format(1, voiceType1, start, soundfile, speed, segstart, segend, amp, maxamp)
                        voice2 = 'playfile {} {} {} {} {} {} {} {} {}'.format(2, voiceType1, start, soundfile, speed, segstart, segend, amp, maxamp)
                        wordSpacing1 = wordSpace1 + np.random.random()*wordSpaceDev1
                        print 'PLAY RESPOND SPACING', wordSpacing1
                        nextTime1 += (dur/speed)+wordSpacing1

                        projection = _project(audio_id, sound_to_face, NAP, video_producer)

                        play_events.append([ dur+wordSpacing1, voice1, voice2, projection, FRAME_SIZE ])                        
                    scheduler.send_pyobj(play_events)
                except:
                    utils.print_exception('Sentence play aborted.')

            if 'respond_sentence' in pushbutton:
                print 'SENTENCE Respond to', pushbutton['filename'][-12:]
                    
                try:
                    filename = pushbutton['filename']
                    audio_segments = utils.get_segments(filename)
                    print 'Sentence response to {} duration {} seconds with {} segments'.format(filename, audio_segments[-1], len(audio_segments)-1)
                    new_sentence = utils.csv_to_array(filename + 'cochlear')
                    norm_segments = np.rint(new_sentence.shape[0]*audio_segments/audio_segments[-1]).astype('int')

                    segment_id = utils.get_most_significant_word(filename)
                    print '**Sentence selected to respond to segment {}'.format(segment_id)

                    NAP = utils.trim_right(new_sentence[norm_segments[segment_id]:norm_segments[segment_id+1]])

                    best_match,_,_,_,_ = audio_memory.find(NAP)
                    audio_id = best_match.audio_id
                    soundfile = best_match.wav_file
        
                    numWords = len(audio_segments)-1
                    print numWords
                    association.send_pyobj(['setParam', 'numWords', numWords ])
                    association.recv_pyobj()
                    
                    association.send_pyobj(['makeSentence', audio_id])
                    print 'respond_sentence waiting for association output...', 
                    sentence, secondaryStream = association.recv_pyobj()

                    print '*** (respond) Play sentence', sentence, secondaryStream
                    start = 0 
                    nextTime1 = 0
                    nextTime2 = 0
                    enableVoice2 = 1

                    play_events = []

                    for i in range(len(sentence)):
                        word_id = sentence[i]
                        soundfile = np.random.choice(wavs[word_id])
                        voiceChannel = 1
                        speed = 1
                        
                        # segment start and end within sound file, if zero, play whole file
                        segstart, segend = wav_audio_ids[(soundfile, word_id)]
                        NAP = _extract_NAP(segstart, segend, soundfile)
                        
                        amp = -3 # voice amplitude in dB
                        #totaldur, maxamp = utils.getSoundParmFromFile(soundfile)
                        _,totaldur,maxamp,_ = utils.getSoundInfo(soundfile)
                        dur = segend-segstart
                        if dur <= 0: dur = totaldur
                        voice1 = 'playfile {} {} {} {} {} {} {} {} {}'.format(voiceChannel, voiceType1, start, soundfile, speed, segstart, segend, amp, maxamp)
                        #start += dur # if we want to create a 'score section' for Csound, update start time to make segments into a contiguous sentence
                        wordSpacing1 = wordSpace1 + np.random.random()*wordSpaceDev1
                        nextTime1 += (dur/speed)+wordSpacing1
                        #print 'voice 2 ready to play', secondaryStream[i], i
                        if enableVoice2:
                            word_id2 = secondaryStream[i]
                            #print 'voice 2 playing', secondaryStream[i]
                            soundfile2 = np.random.choice(wavs[word_id2])
                            voiceChannel2 = 2
                            start2 = 0.7 #  set delay between voice 1 and 2
                            speed2 = 0.7
                            amp2 = -10 # voice amplitude in dB
                            try:
                                segstart2, segend2 = wav_audio_ids[(soundfile2, word_id2)]
                                dur2 = segend2-segstart2
                                #totalDur2, maxamp2 = utils.getSoundParmFromFile(soundfile2)
                                _,totalDur2,maxamp2,_ = utils.getSoundInfo(soundfile)
                                if dur2 <= 0: dur2 = totalDur2
                                voice2 = 'playfile {} {} {} {} {} {} {} {} {}'.format(voiceChannel2, voiceType2, start2, soundfile2, speed2, segstart2, segend2, amp2, maxamp2)
                                wordSpacing2 = wordSpace2 + np.random.random()*wordSpaceDev2
                                nextTime2 += (dur2/speed2)+wordSpacing2
                            except:
                                voice2 = ''
                                utils.print_exception('VOICE 2 tried to access an illegal soundfile/audio_id combination.')
                            #enableVoice2 = 0
                        # trig another word in voice 2 only if word 2 has finished playing (and sync to start of voice 1)
                        if nextTime1 > nextTime2: enableVoice2 = 1 

                        projection = _project(audio_id, sound_to_face, NAP, video_producer)
                        print 'SENTENCE RESPOND SPACING', wordSpacing1
                        play_events.append([ dur+wordSpacing1, voice1, voice2, projection, FRAME_SIZE ])

                    scheduler.send_pyobj(play_events)
                    print 'Sentence respond time from creation of wav file was {} seconds'.format(time.time() - utils.filetime(filename))
                except:
                    utils.print_exception('Sentence response aborted.')
                    
            if 'testSentence' in pushbutton:
                print 'testSentence', pushbutton
                association.send_pyobj(['makeSentence',int(pushbutton['testSentence'])])
                print 'testSentence waiting for association output...'
                sentence, secondaryStream = association.recv_pyobj()
                print '*** Test sentence', sentence, secondaryStream
            
            if 'assoc_setParam' in pushbutton:
                try:
                    parm, value = pushbutton['assoc_setParam'].split()
                    association.send_pyobj(['setParam', parm, value ])
                    association.recv_pyobj()
                except:
                    utils.print_exception('Assoc set param aborted.')

            if 'respond_setParam' in pushbutton:
                items = pushbutton['respond_setParam'].split()
                if items[0] == 'voiceType':
                    chan = items[1]
                    if chan == '1': voiceType1 = int(items[2])
                    if chan == '2': voiceType2 = int(items[2])
                if items[0] == 'wordSpace':
                    chan = items[1]
                    print 'wordSpace chan', chan, items
                    if chan == '1': wordSpace1 = float(items[2])
                    if chan == '2': wordSpace2 = float(items[2])
                if items[0] == 'wordSpaceDev':
                    chan = items[1]
                    print 'wordSpaceDev1 chan', chan, items
                    if chan == '1': wordSpaceDev1 = float(items[2])
                    if chan == '2': wordSpaceDev2 = float(items[2])

            if 'play_id' in pushbutton:
                try:
                    items = pushbutton['play_id'].split(' ')
                    if len(items) < 3: print 'PARAMETER ERROR: play_id audio_id voiceChannel voiceType'
                    play_audio_id = int(items[0])
                    voiceChannel = int(items[1])
                    voiceType = int(items[2])
                    print 'play_audio_id', play_audio_id, 'voice', voiceChannel
                    print 'wavs[play_audio_id]', wavs[play_audio_id]
                    #print wavs
                    soundfile = np.random.choice(wavs[play_audio_id])
                    
                    speed = 1
                    #print 'wav_audio_ids', wav_audio_ids
                    segstart, segend = wav_audio_ids[(soundfile, play_audio_id)]
                    #segstart = 0 # segment start and end within sound file
                    #segend = 0 # if zero, play whole file
                    amp = -3 # voice amplitude in dB
                    #dur, maxamp = utils.getSoundParmFromFile(soundfile)
                    _,dur,maxamp,_ = utils.getSoundInfo(soundfile)
                    start = 0
                    sender.send_json('playfile {} {} {} {} {} {} {} {} {}'.format(voiceChannel, voiceType, start, soundfile, speed, segstart, segend, amp, maxamp))
                except:
                    utils.print_exception('play_id aborted.')

            if 'print_me' in pushbutton:
                # just for inspecting the contents of objects while running 
                print 'printing '+pushbutton['print_me']
                if 'brain ' in pushbutton['print_me']: 
                    print_variable = pushbutton['print_me'].split('brain ')[-1]
                    try:
                        print eval(print_variable)
                    except Exception, e:
                        print e, 'print_me in brain failed.'
                elif 'association ' in pushbutton['print_me']: 
                    print_variable = pushbutton['print_me'].split('association ')[-1]
                    association.send_pyobj(['print_me',print_variable])

            if 'dream' in pushbutton:
                play_events = []
                for audio_segment in audio_memory.all_segments():
                    segstart, segend = audio_segment.segment_idxs
                    dur = segend - segstart
                    NAP = _extract_NAP(segstart, segend, audio_segment.wav_file)
                    speed = 1
                    amp = -3
                    maxamp = 1
                    start = 0
                    voice1 = 'playfile {} {} {} {} {} {} {} {} {}'.format(1, 6, np.random.rand()/3, audio_segment.wav_file, speed, segstart, segend, amp, maxamp)
                    projection = _project(audio_segment.audio_id, sound_to_face, NAP, video_producer)
                    voice2 = 'playfile {} {} {} {} {} {} {} {} {}'.format(2, 6, np.random.randint(3,6), audio_segment.wav_file, speed, segstart, segend, amp, maxamp)
                    play_events.append([ dur, voice1, voice2, projection, FRAME_SIZE ])
                print 'Dream mode playing back {} memories'.format(len(play_events))
                scheduler.send_pyobj(play_events)

            if 'save' in pushbutton:
                utils.save('{}.{}'.format(pushbutton['save'], mp.current_process().name), [ sound_to_face, wordFace, face_to_sound, faceWord, video_producer, wavs, wav_audio_ids, audio_classifier, maxlen, NAP_hashes, face_id, face_recognizer, audio_memory ])

            if 'load' in pushbutton:
                sound_to_face, wordFace, face_to_sound, faceWord, video_producer, wavs, wav_audio_ids, audio_classifier, maxlen, NAP_hashes, face_id, face_recognizer, audio_memory = utils.load('{}.{}'.format(pushbutton['load'], mp.current_process().name))
예제 #30
0
파일: altbrain.py 프로젝트: afcarl/self_dot
def new_dream(audio_memory):
    
    #import matplotlib.pyplot as plt
    #plt.ion()

    try:
        print 'Dreaming - removing wrongly binned filenames'
        mega_filenames_and_indexes = []

        for audio_id, audio_segments in audio_memory.audio_ids.iteritems():

            NAP_detail = 'low'
            filenames_and_indexes = []

            for audio_segment in audio_segments:
                segstart, segend = audio_segment.segment_idxs
                audio_times = utils.get_segments(audio_segments.wav_file)
                norm_segstart = segstart/audio_times[-1]
                norm_segend = segend/audio_times[-1]
                filenames_and_indexes.append([ soundfile, norm_segstart, norm_segend, audio_id, NAP_detail ])
                
            mega_filenames_and_indexes.extend(filenames_and_indexes)

            k = 2
            print 'Examining audio_id {}'.format(audio_id)
            if len(audio_segments) == 1:
                print 'Just one member in this audio_id, skipping analysis'
                continue

            sparse_codes = mysai.experiment(filenames_and_indexes, k)
            # plt.matshow(sparse_codes, aspect='auto')
            # plt.colorbar()
            # plt.draw()

            coarse = np.mean(sparse_codes, axis=1)
            coarse.shape = (len(coarse), 1)

            codebook,_ = kmeans(coarse, k)
            instances = [ vq(np.atleast_2d(s), codebook)[0] for s in coarse ]

            freqs = itemfreq(instances)
            sorted_freqs = sorted(freqs, key=lambda x: x[1])
            print 'Average sparse codes: {} Class count: {}'.format(list(itertools.chain.from_iterable(coarse)), sorted_freqs)

            if len(sorted_freqs) == 1:
                print 'Considered to be all the same.'
                continue

            fewest_class = sorted_freqs[0][0]
            ousted_audio_segments = [ audio_segment for audio_segment, i in zip(audio_segments, instances) if i == fewest_class ]
            print 'Class {} has fewest members, deleting audio_segments {}'.format(fewest_class, ousted_audio_segments)
            filter(audio_memory.forget, ousted_audio_segments)

        print 'Creating mega super self-organized class'

        for row in mega_filenames_and_indexes:
            row[-1] = 'high'

        high_resolution_k = 256
        clusters = 24
        sparse_codes = mysai.experiment(mega_filenames_and_indexes, high_resolution_k)
        sparse_codes = np.array(sparse_codes)
        # plt.matshow(sparse_codes, aspect='auto')
        # plt.colorbar()
        # plt.draw()

        codebook,_ = kmeans(sparse_codes, clusters)
        instances = [ vq(np.atleast_2d(s), codebook)[0] for s in sparse_codes ]

        cluster_list = {}
        for mega, instance in zip(mega_filenames_and_indexes, instances):
            soundfile,_,_,audio_id,_ = mega
            cluster_list[(soundfile, audio_id)] = instance

        print cluster_list
    except:
        utils.print_exception('NIGHTMARE!')
예제 #31
0
def brand_rank_prediction(file, model_dict):
    file_name = os.path.basename(file)

    try:
        logger.info("{file_name} predict".format(file_name=file_name))
        predict_begin_time = time.time()

        tp = pd.read_csv(file, sep='\t', encoding='gb18030',
                         names=['brandName', 'category', 'cityCount', 'averageGrade', 'midgrade', 'averageCommentCount',
                                'midCommentCount', 'averageprice', 'midPrice', 'categoryTotalCount', 'fiveRank',
                                'tenRank', 'twentyRank', 'fortyRank', 'fivePercentRank', 'tenPercentRank',
                                'twentyPercentRank', 'fortyPercentRank', 'hundredPercentRank'],
                         quoting=csv.QUOTE_NONE)

        parent_category = tp['category'][0]

        if not parent_category in model_dict:
            brand_rank_result = tp.loc[:, ['brandName', 'hundredPercentRank', 'categoryTotalCount']]
        else:
            tp = feature_covert(tp)
            if file_name == 'binguanfandian-feature':
                features = tp.loc[:, ['averageCommentCount_log', 'averageprice_log']]
            elif file_name == 'jinrongyinxing-feature':
                features = tp.loc[:,
                           ['cityCount_log', 'averageGrade_log', 'averageCommentCount_log', 'averageprice_log',
                            'categoryTotalCount_log', 'is_atm_sign']]
            else:
                features = tp.loc[:,
                           ['cityCount_log', 'averageGrade_log', 'averageCommentCount_log', 'averageprice_log',
                            'categoryTotalCount_log']]

            model = model_dict.get(parent_category)

            y_train_pred = model.predict(features)

            tp['y_train_pred'] = y_train_pred
            tp['pre_rank_level'] = tp['y_train_pred'].apply(func=convert_level)
            brand_rank_result = tp.loc[:, ['brandName', 'pre_rank_level', 'categoryTotalCount']]

        path = local_featurePoiRank_path + file_name + "-brandRank"
        brand_rank_result.to_csv(path, encoding='gb18030', sep='\t', index=False, header=False, quoting=csv.QUOTE_NONE)

        logger.info("{file_name} rank predict finished,used time:{use_time}".format(
            file_name=file_name, use_time=time.time() - predict_begin_time))


    except Exception as e:
        logger.info('{file_name} exception,info:{info}'.format(file_name=file_name, info=utils.print_exception()))
예제 #32
0
 def _show_error(self, exception, message):
     """Show error.
     'exception' is the exception information.
     'message' is the corresponding message."""
     print_exception(exception, message)
     self._show_connection_error(exception)
예제 #33
0
def rank_prediction(file, model_dict):
    file_name = os.path.basename(file)

    try:
        # global local_featurePoiRank_path
        logger.info("{file_name} predict".format(file_name=file_name))
        tp = pd.read_csv(file, sep='\t', encoding='gb18030',
                                 names=['name', 'dataId', 'city', 'parentCategory', 'subCategory', 'brand',
                                        'categoryScore', 'tagScore', 'matchCountScore', 'gradeScore',
                                        'commentScore', 'priceScore', 'areaScore', 'leafCountScore', 'doorCountScore',
                                        'parkCountScore', 'innerCountScore', 'buildCountScore',
                                        'point', 'keyword', 'matchCount', 'grade', 'comment', 'price', 'area',
                                        'leafCount', 'doorCount', 'parkCount', 'innerCount', 'buildCount',
                                        'hotCount', 'hitcount', 'viewcount', 'citySize'],
                                 quoting=csv.QUOTE_NONE, iterator=True, chunksize=1000)

        featurePoi = pd.concat(tp, ignore_index=True)


        parentCategory = featurePoi['parentCategory'][0]

        if not parentCategory in model_dict:
            return

        model = model_dict.get(parentCategory)
        begin_time = time.time()
        W = weight.loc[weight.parentCategory == parentCategory, 'subCategoryScore':]
        X = featurePoi.iloc[:, 6:18]

        featureValues = np.multiply(X, W)

        featurePoi.loc[:, ('sumWeight1')] = featureValues.sum(axis=1)

        y_train_pred = model.predict(featurePoi.loc[:, ['sumWeight1', 'citySize']])

        featurePoi['y_train_pred'] = y_train_pred
        featurePoi['pre_rank_level'] = featurePoi['y_train_pred'].apply(func=convert_level)

        path = local_featurePoiRank_path + file_name + "-rank"
        featurePoi.to_csv(path, encoding='gb18030', sep='\t', index=False, header=False,quoting=csv.QUOTE_NONE)

        logger.info("{file_name} rank predict finished,used time:{use_time}".format(
            file_name=file_name, use_time=time.time() - begin_time))


    except Exception as e:
        logger.info('{file_name} exception,info:{info}'.format(file_name=file_name,info=utils.print_exception()))
예제 #34
0
def write_test_file(name, all_tests):

    for test in all_tests:
        unicode = requires_unicode(str(test))
        if not unicode and not isinstance(test.expected(), bool):
            unicode = requires_unicode(test.expected().render())
        if unicode:
            test.add_condition(r'UNICODE_LITERALS_OK')

    tests_by_group = {}
    for test in all_tests:
        if test.group() not in tests_by_group:
            tests_by_group[test.group()] = {}
        cond = test.condition()
        if cond not in tests_by_group[test.group()]:
            tests_by_group[test.group()][cond] = []
        tests_by_group[test.group()][cond].append(test)
    all_tests = tests_by_group

    test_file_path = Path(
        utils.entry_script_dir(), '..', 'tests',
        rf'conformance_{sanitize(name.strip())}.cpp').resolve()
    with StringIO() as test_file_buffer:
        write = lambda txt, end='\n': print(
            txt, file=test_file_buffer, end=end)

        # preamble
        write(
            r'// This file is a part of toml++ and is subject to the the terms of the MIT license.'
        )
        write(r'// Copyright (c) Mark Gillard <*****@*****.**>')
        write(
            r'// See https://github.com/marzer/tomlplusplus/blob/master/LICENSE for the full license text.'
        )
        write(r'// SPDX-License-Identifier: MIT')
        write(r'//-----')
        write(
            r'// this file was generated by generate_conformance_tests.py - do not modify it directly'
        )
        write(r'')
        write(r'#include "tests.h"')

        # test data
        write(r'')
        write('namespace')
        write('{', end='')
        for group, conditions in all_tests.items():
            for condition, tests in conditions.items():
                write('')
                if condition != '':
                    write(f'#if {condition}')
                    write('')
                for test in tests:
                    write(f'\t{test}')
                if condition != '':
                    write('')
                    write(f'#endif // {condition}')
        write('}')

        # tests
        write('')
        write(f'TEST_CASE("conformance - {name}")')
        write('{', end='')
        for group, conditions in all_tests.items():
            for condition, tests in conditions.items():
                if condition != '':
                    write('')
                    write(f'#if {condition}')
                for test in tests:
                    write('')
                    expected = test.expected()
                    if isinstance(expected, bool):
                        if expected:
                            write(
                                f'\tparsing_should_succeed(FILE_LINE_ARGS, {test.identifier()}); // {test.name()}'
                            )
                        else:
                            write(
                                f'\tparsing_should_fail(FILE_LINE_ARGS, {test.identifier()}); // {test.name()}'
                            )
                    else:
                        s = expected.render('\t\t')
                        write(
                            f'\tparsing_should_succeed(FILE_LINE_ARGS, {test.identifier()}, [](toml::table&& tbl) // {test.name()}'
                        )
                        write('\t{')
                        write(f'\t\tconst auto expected = {s};')
                        write('\t\tREQUIRE(tbl == expected);')
                        write('\t});')
                if condition != '':
                    write('')
                    write(f'#endif // {condition}')
        write('}')
        write('')

        test_file_content = test_file_buffer.getvalue()

        # clang-format
        print(f"Running clang-format for {test_file_path}")
        try:
            test_file_content = utils.apply_clang_format(
                test_file_content, cwd=test_file_path.parent)
        except Exception as ex:
            print(rf'Error running clang-format:', file=sys.stderr)
            utils.print_exception(ex)

        # write to disk
        print(rf'Writing {test_file_path}')
        with open(test_file_path, 'w', encoding='utf-8',
                  newline='\n') as test_file:
            test_file.write(test_file_content)