Example #1
0
def classify_temp_header(string):
    import functions
    if functions.is_number(string):
        number = float(string)
        return (1, number)

    elif string[0:1] == 'T' and string[-3:] == '(C)' and functions.is_number(
            string[1:-3]):
        number = int(string[1:-3])
        return (2, number)

    elif string[0:1] == 'T' and functions.is_number(string[1:]):
        number = int(string[1:])
        return (2, number)

    elif len(string) >= 4:
        if string[0:4] == 'TEMP' and functions.is_number(string[4:]):
            number = int(string[4:])
            return (2, number)
        elif string[0:5] == 'Temp ' and functions.is_number(string[5:]):
            number = int(string[5:])
            return (2, number)
        else:
            return (0, 0)
    else:
        return (0, 0)
Example #2
0
def seq1(update, context):
    if not functions.is_number(update.message.text):
        update.message.reply_text("Пожалуйста, введите первый член в правильном формате: число")
        return 1
    for_seq["a1/b1"] = float(update.message.text)
    if for_seq["f"]:
        update.message.reply_text("А теперь введите разность вашей арифметической прогрессии(число)")
    else:
        update.message.reply_text("А теперь введите знаменатель вашей геометрической прогрессии(число)")
    return 2
Example #3
0
def cnt_sis2(update, context):
    if not functions.is_number(update.message.text):
        update.message.reply_text("Введите, пожалйста, систему счисления в правильном формате: целое число от 2 до 10")
        return 2
    if int(update.message.text) != float(update.message.text):
        update.message.reply_text("Введите, пожалйста, систему счисления в правильном формате: целое число от 2 до 10")
        return 2
    if not (1 < int(update.message.text) < 11):
        update.message.reply_text("Введите, пожалйста, систему счисления в правильном формате: целое число от 2 до 10")
        return 2
    for_cnt_sis["sis1"] = int(update.message.text)
    update.message.reply_text("А теперь введите в какую систему счисления вы хотите его перевести")
    return 3
Example #4
0
def cnt_sis1(update, context):
    if not functions.is_number(update.message.text):
        update.message.reply_text("Введите, пожалйста, в правильном формате: любое целое положительное число")
        return 1
    if int(update.message.text) != float(update.message.text):
        update.message.reply_text("Введите, пожалйста, в правильном формате: любое целое положительное число")
        return 1
    if int(update.message.text) < 0:
        update.message.reply_text("Введите, пожалйста, в правильном формате: любое целое положительное число")
        return 1
    for_cnt_sis["num"] = int(update.message.text)
    update.message.reply_text("Теперь введите в какой системе считсления оно находится(целое число от 2 до 10)")
    return 2
Example #5
0
def cnt_sis3(update, context):
    if not functions.is_number(update.message.text):
        update.message.reply_text("Введите, пожалйста систему счисления в правильном формате: целое число от 2 до 10")
        return 3
    if int(update.message.text) != float(update.message.text):
        update.message.reply_text("Введите, пожалйста систему счисления в правильном формате: целое число от 2 до 10")
        return 3
    if not (1 < int(update.message.text) < 11):
        update.message.reply_text("Введите, пожалйста систему счисления в правильном формате: целое число от 2 до 10")
        return 3
    for_cnt_sis["sis2"] = int(update.message.text)
    update.message.reply_text(str(for_cnt_sis["num"]) + " в " + str(for_cnt_sis["sis1"]) +
                              "ой системе счисления равно " + functions.perevod(for_cnt_sis) + " в " +
                              str(for_cnt_sis["sis2"]) + "ой системе счисления")
    return ConversationHandler.END
Example #6
0
 def convertCell(self, cell, is_math = False):
     """ convert cell string to latex format
         first tokenize string by spaces
         if is_math = true, add $ $ around numbers
         
         TESTED
     """
     new_cell = cell
     if (is_math):
         # separate the cell into a list using space
         new_cell_list = new_cell.split(' ')
         # check each element; if number, add $$ around
         for index in range(len(new_cell_list)):
             if (functions.is_number(new_cell_list[index])):
                 new_cell_list[index] = '$' + new_cell_list[index] + '$'
         new_cell = ' '.join(new_cell_list)
     return new_cell
Example #7
0
def seq2(update, context):
    if not functions.is_number(update.message.text):
        if for_seq["f"]:
            update.message.reply_text("Пожалуйста, введите разность вашей арифметической прогрессии "
                                      "в правильном формате: число")
        else:
            update.message.reply_text("Пожалуйста, введите знаменатель вашей геометрической прогрессии "
                                      "в правильном формате: число")
        return 2
    for_seq["d/q"] = float(update.message.text)
    if for_seq["f"]:
        update.message.reply_text("Ну и осталось ввести количесво членов в вашей фриметической прогрессии. "
                                  "Введите его(это должно быть целое неотрицательное число)")
    else:
        update.message.reply_text("Ну и осталось ввести количество членов в вашей геометрической прогрессии."
                                  "Введите его(это должно быть целое неотрицательное число)")
    return 3
Example #8
0
def seq3(update, context):
    if not functions.is_number(update.message.text):
        update.message.reply_text("Пожалуйста, введите количество членов в правильном формате: "
                                  "целое неотрицательное ЧИСЛО")
    if int(update.message.text) != float(update.message.text):
        update.message.reply_text("Пожалуйста, введите количество членов в правильном формате: "
                                  "ЦЕЛОЕ неотрицательное число")
    if int(update.message.text) < 0:
        update.message.reply_text("Пожалуйста, введите количество членов в правильном формате: "
                                  "целое НЕОТРИЦАТЕЛЬНОЕ число")
    for_seq["n"] = int(update.message.text)
    if for_seq["f"]:
        update.message.reply_text("Сумма первых " + str(for_seq["n"]) + " членов вашей арифметической прогресии равна "
                                  + str(functions.sequences(for_seq)))
    else:
        update.message.reply_text("Сумма первых " + str(for_seq["n"]) + " членов вашей геометрической прогресии равна "
                                  + str(functions.sequences(for_seq)))
    return ConversationHandler.END
Example #9
0
def process_dates(date_string, buoy_name, file_ext_name):
    import functions

    date_functions_d = {1: datefunc1, 3: datefunc3, 5: datefunc5}

    import re
    date_cpts = re.split('[:/ ]', date_string)
    bad_cpts = [cpt for cpt in date_cpts if not functions.is_number(cpt)]

    if len(bad_cpts) > 0:
        return None
    else:

        ncpts = len(date_cpts)

        use_function = date_functions_d[ncpts]

        processed_date = use_function(date_cpts, buoy_name, file_ext_name)
        return processed_date
Example #10
0
def task():
    args = request.get_json(silent=True)
    print(args)
    if args is None:
        args = request.form.to_dict()
        if len(args) == 0:
            args = request.args.to_dict()
            if len(args) == 0:
                return jsonify({"error": "No arguments in request"})
    if len(args) > 1:
        return jsonify({"error": "Got more then 1 argument"})
    number = list(args.values())[0]

    if functions.is_number(number) == False:
        return jsonify(
            {"error": "The argument does not match the task conditions"})
    else:
        number = int(number)

    try:
        db.Numbers.insert_many([{
            "Number": number
        }, {
            "Number": number - 1
        }],
                               ordered=True)
    except pymongo.errors.DuplicateKeyError as e:
        print("\ncreate index error: " + str(e.details))
    except pymongo.errors.BulkWriteError as e:
        print("\nBULKerror: " + str(e.details))
        if e.details["writeErrors"][0]["keyValue"]["Number"] != number - 1:
            log = strftime("%d.%m.%Y %H:%M:%S", gmtime()) + " " + str(
                number) + " Number has already been received\n"
            print(log)
            return jsonify({
                "error":
                "Number '" + str(number) + "' has already been received"
            })

    return jsonify({"response": str(number + 1)})
Example #11
0
    def read(self,full_file,key):
        '''Reads IMB temperature data from a given file into the given
           temperature series'''
        import csv
        import data_series as ds
        import functions
        
        max_zlist = len(key.value_index)                
            
        fileh = open(full_file)
        rows = csv.reader(fileh)
        for row in rows:
            if len(row)>0:
                date_string = row[key.date_index]
                date = ds.process_dates(date_string,self.name,self.fen)
                
                if date is not None:
                
                    temp_list = []
                    for index in key.value_index:
                        if index >= len(row):
                            temp_value = self.mdi
                        else:
                            temp_string = row[index]
                            if len(temp_string) != 0 and \
                                 functions.is_number(temp_string):
                                temp_value = float(temp_string)
                            else:
                                temp_value = self.mdi
                        
                        temp_list.append(temp_value)
                        
                    if len(temp_list) != max_zlist:
                        print('Number of temp instances does not match key for date ', date)
                        return 0
                        
                    if temp_list.count(self.mdi) != len(temp_list):
                        self.profile_set[date] = temp_list

        fileh.close()
Example #12
0
def buoylist(year=0):
    '''Returns list of all IMB labels'''
    import subprocess
    import functions
    import filepaths

    command = ['ls', filepaths.filepaths()['source_dir']]
    output = subprocess.Popen(command, stdout=subprocess.PIPE).communicate()[0]
    str_output = output.decode('utf-8')
    str_output = str_output.split('\n')

    if year != 0:
        restrict_output = [
            buoyname for buoyname in str_output if str(year) in buoyname
        ]
    else:
        restrict_output = [
            buoyname for buoyname in str_output
            if len(buoyname) == 5 and functions.is_number(buoyname[0:4])
        ]

    return restrict_output
Example #13
0
async def api_search_products(*, search_item, shift_type, specify_filter,
                              order_by, deli_free, love_baby, lowest_price,
                              highest_price, deli_location, turn_type,
                              curr_page):
    error_msg = None
    products = []
    keywordlist = ['UNIQLO', 'SUPERME', 'NIKE', 'ADIDAS', 'APPLE', 'HUAWEI']
    producttypelist = ['shoes', 'clothes', 'electronic']
    page_num = None
    new_curr_page = 1

    if search_item not in keywordlist and search_item not in producttypelist and search_item != 'all':
        error_msg = 'Please input limited search conditions'
    else:
        if order_by == 'None' or order_by == '':
            order_by = 'product_sales_qty desc'
        where_clause = ''
        '''if search_item == 'all':
            parameter_list = [['UNIQLO'], ['APPLE'], ['NIKE'], ['SUPERME'], ['HUAWEI'], ['ADIDAS']]
            backup_parameter_list = [['UNIQLO'], ['APPLE'], ['NIKE'], ['SUPERME'], ['HUAWEI'], ['ADIDAS']]
            where_clause = where_clause + 'keyword=?'
            if shift_type == 'tmall':
                where_clause = where_clause + ' and istmall=?'
                for k in range(6):
                    parameter_list[k].extend('1')
                    backup_parameter_list[k].extend('1')
            if deli_free != '':
                where_clause = where_clause + ' and title like ?'
                for k in range(6):
                    parameter_list[k].extend(['%' + deli_free + '%'])
                    backup_parameter_list[k].extend(['%' + deli_free + '%'])
            if love_baby != '':
                where_clause = where_clause + ' and (iconkey1 = ? or iconkey2 = ? or iconkey3 = ? or iconkey4 = ? or iconkey5 = ?)'
                for k in range(6):
                    parameter_list[k].extend(['icon-fest-gongyibaobei','icon-fest-gongyibaobei','icon-fest-gongyibaobei','icon-fest-gongyibaobei','icon-fest-gongyibaobei'])
                    backup_parameter_list[k].extend(['icon-fest-gongyibaobei','icon-fest-gongyibaobei','icon-fest-gongyibaobei','icon-fest-gongyibaobei','icon-fest-gongyibaobei'])
            if lowest_price != '':
                if not is_number(lowest_price):
                    error_msg = 'Please input price only include the number!'
                    return dict(products=products, error_msg=error_msg)
                else:
                    lowest_price = float(lowest_price)
                where_clause = where_clause + ' and product_price_float >= ?'
                for k in range(6):
                    parameter_list[k].extend([lowest_price])
                    backup_parameter_list[k].extend([lowest_price])
            if highest_price != '':
                if not is_number(highest_price):
                    error_msg = 'Please input price only include the number!'
                    return dict(products=products, error_msg=error_msg)
                else:
                    highest_price = float(highest_price)
                where_clause = where_clause + ' and product_price_float <= ?'
                for k in range(6):
                    parameter_list[k].extend([highest_price])
                    backup_parameter_list[k].extend([highest_price])
            if deli_location != '':
                where_clause = where_clause + ' and shopaddress like ?'
                for k in range(6):
                    parameter_list[k].extend(['%' + deli_location + '%'])
                    backup_parameter_list[k].extend(['%' + deli_location + '%'])
            for i in range(4):
                if not operator.eq(parameter_list,backup_parameter_list):  # judgment whether the parameter_list changed as the limit parameter can join in
                    parameter_list = copy.deepcopy(backup_parameter_list)  # must use deep copy because both the two  ram address ref to one value
                for j in range(6):
                    products_temp = await TaobaoProducts.findAll(where_clause, parameter_list[j],
                                                                 orderBy=order_by, limit=(2 * i, 2))
                    products = products + products_temp

        else:'''
        parameter = []
        first_spe_filter, second_spe_filter = analysis_specify_filter(
            specify_filter)
        if search_item in keywordlist:
            where_clause = where_clause + 'keyword=?'
            parameter.extend([search_item])
        elif search_item in producttypelist:
            where_clause = where_clause + 'product_type=?'
            parameter.extend([search_item])
        else:
            pass
        if shift_type == 'tmall':
            where_clause = where_clause + (' and istmall=?' if
                                           where_clause != '' else 'istmall=?')
            parameter.extend('1')
        if first_spe_filter != '':
            where_clause = where_clause + (
                ' and title like ?' if where_clause != '' else 'title like ?')
            parameter.extend(['%' + first_spe_filter + '%'])
        if second_spe_filter != '':
            where_clause = where_clause + (
                ' and title like ?' if where_clause != '' else 'title like ?')
            parameter.extend(['%' + second_spe_filter + '%'])
        if deli_free != '':
            where_clause = where_clause + (
                ' and title like ?' if where_clause != '' else 'title like ?')
            parameter.extend(['%' + deli_free + '%'])
        if love_baby != '':
            where_clause = where_clause + (
                ' and (iconkey1 = ? or iconkey2 = ? or iconkey3 = ? or iconkey4 = ? or iconkey5 = ?)'
                if where_clause != '' else
                '(iconkey1 = ? or iconkey2 = ? or iconkey3 = ? or iconkey4 = ? or iconkey5 = ?)'
            )
            parameter.extend([
                'icon-fest-gongyibaobei', 'icon-fest-gongyibaobei',
                'icon-fest-gongyibaobei', 'icon-fest-gongyibaobei',
                'icon-fest-gongyibaobei'
            ])
        if lowest_price != '':
            if not is_number(lowest_price):
                error_msg = 'Please input price only include the number!'
                return dict(products=products, error_msg=error_msg)
            else:
                lowest_price = float(lowest_price)
            where_clause = where_clause + (' and product_price_float >= ?'
                                           if where_clause != '' else
                                           'product_price_float >= ?')
            parameter.extend([lowest_price])
        if highest_price != '':
            if not is_number(highest_price):
                error_msg = 'Please input price only include the number!'
                return dict(products=products, error_msg=error_msg)
            else:
                highest_price = float(highest_price)
            where_clause = where_clause + (' and product_price_float <= ?'
                                           if where_clause != '' else
                                           'product_price_float <= ?')
            parameter.extend([highest_price])
        if deli_location != '':
            where_clause = where_clause + (' and shopaddress like ?'
                                           if where_clause != '' else
                                           'shopaddress like ?')
            parameter.extend(['%' + deli_location + '%'])
        total_num = await TaobaoProducts.findNumber('count(id)', where_clause,
                                                    parameter)
        page_num = total_num // 48 + (1 if total_num % 48 > 0 else 0)
        if turn_type != '':
            if turn_type == '1':
                new_curr_page = 1
            elif turn_type == '2':
                new_curr_page = 2
            elif turn_type == '3':
                new_curr_page = 3
            elif turn_type == 'last':
                new_curr_page = page_num
            else:
                int_curr_page = int(curr_page)
                if turn_type == 'pre':
                    new_curr_page = int_curr_page - 1
                else:
                    new_curr_page = int_curr_page + 1
        products = await TaobaoProducts.findAll(
            where_clause,
            parameter,
            orderBy=order_by,
            limit=(48 * (new_curr_page - 1), 48))
    return dict(products=products,
                error_msg=error_msg,
                page_num=page_num,
                curr_page=new_curr_page)
Example #14
0
def calc_RV(RV_out,file_name):
    ### Find the columns related
    related = []
    file_name = string.split(file_name,".")[0]
    for i in range(len(RV_out)):
        row_name = RV_out[i][1]
        row_name = string.split(row_name,".")[0]
        row_name = string.split(row_name,"normspec_")[1]
        #print file_name,row_name
        if file_name == row_name:
            related.append(RV_out[i])

    RV_table = []
    for i in range(len(related)):
        print related[i]
        JD = related[i][3]
        stellar_height = related[i][5]
        vhelio = related[i][7]
        vhelio_err = related[i][8]
        aperture = int(related[i][4]) - 1
        print aperture_weights,aperture

        flux_weight = aperture_weights[aperture]

        if functions.is_number(stellar_height) and functions.is_number(JD) and functions.is_number(vhelio) and functions.is_number(vhelio_err):
            if stellar_height > 0.20 and abs(vhelio) < 500: ### NORMAL
            #if stellar_height > 0.0: ### Manual
                RV_table.append([JD,vhelio,vhelio_err,stellar_height,flux_weight])

    if RV_table == []:
        print "Lowering ccf_height threshold"
        ### Lower the bar on ccf_height
        for i in range(len(related)):
            print related[i]
            JD = related[i][3]
            stellar_height = related[i][5]
            vhelio = related[i][7]
            vhelio_err = related[i][8]
            aperture = int(related[i][4]) - 1
            flux_weight = aperture_weights[aperture]

            if functions.is_number(stellar_height) and functions.is_number(JD) and functions.is_number(vhelio) and functions.is_number(vhelio_err):
                if stellar_height > 0.01 and abs(vhelio) < 500: ### NORMAL
                #if stellar_height > 0.0: ### Manual
                    RV_table.append([JD,vhelio,vhelio_err,stellar_height,flux_weight])

    if RV_table == []:
        ### If it still doesn't work, return INDEF
        return ["INDEF","INDEF","INDEF","INDEF"]

    else:

        ### Use (fxcor telluric error + telluric height + stellar error +
        ### stellar height) * (1/flux_weight) as weights
        ### For final velocity calculations

        JD = array(transpose(RV_table)[0])
        vhelio = array(transpose(RV_table)[1])
        vhelio_err = array(transpose(RV_table)[2])
        stellar_height = array(transpose(RV_table)[3])
        flux_weights = array(transpose(RV_table)[4])

        fxcor_err = average(vhelio_err)

        if JD == []:
            return ["INDEF","INDEF","INDEF"]
        if len(JD) == 1:
            return JD[0],vhelio[0],vhelio_err[0],stellar_height[0]
        if len(JD) == 2:
            velocity = vhelio
            weights = (1/flux_weights) * (vhelio_err / sum(vhelio_err) + (1/stellar_height) / sum(1/stellar_height))
            return JD[0],waverage(velocity,weights),average(vhelio_err),average(stellar_height)
        if len(JD) >= 3:
            v_list = []
            err_list = []
            stellar_height_list = []
            flux_weights_list = []
            for i in range(len(vhelio)):
                if (abs(vhelio[i] - median(vhelio)) < 10.0) and (vhelio_err[i] < 10.0):
                    v_list.append(vhelio[i])
                    err_list.append(vhelio_err[i])
                    stellar_height_list.append(stellar_height[i])
                    flux_weights_list.append(flux_weights[i])

            if len(v_list) == 0:
                v_list = []
                err_list = []
                stellar_height_list = []
                flux_weights_list = []
                for i in range(len(vhelio)):
                    if (abs(vhelio[i] - median(vhelio)) < 50.0) and (vhelio_err[i] < 50.0):
                        v_list.append(vhelio[i])
                        err_list.append(vhelio_err[i])
                        stellar_height_list.append(stellar_height[i])
                        flux_weights_list.append(flux_weights[i])

            err_list = array(err_list)
            stellar_height_list = array(stellar_height_list)
            flux_weights_list = array(flux_weights_list)

            poisson_factor = (1/flux_weights_list) * (err_list/sum(err_list) +(1/stellar_height_list)/sum(1/stellar_height_list))
            poisson_factor = 1 / poisson_factor
            poisson_factor = poisson_factor / max(poisson_factor)
            poisson_factor = sqrt(sum(poisson_factor)/nstandards)

            v_err = std(v_list)
            velocity = waverage(v_list,(1/flux_weights_list) * (err_list/sum(err_list) + (1/stellar_height_list)/sum(1/stellar_height_list)))
            error = sqrt(waverage(err_list,1/flux_weights_list)**2 + v_err**2)/poisson_factor 
            return JD[0],velocity,error,average(stellar_height_list)
Example #15
0
        logging.debug("waiting on values of variables...")
        for var in variables_name:
            x = input('- ' + var + ' >: ')

            if x == '':
                break

            else:
                variables.append(x)

        if len(variables) < len(variables_name):
            logging.debug("procedure is aborted")
            print("Adding procedure is aborted")

        else:
            while functions.is_number(variables[1]) == False:
                print("Value of quantity is not a number!\nPleas fix it")
                variables[1] = input(">: ")

            while functions.is_number(variables[3]) == False:
                print("Value of price is not a number!\nPleas fix it")
                variables[3] = input(">: ")

            if functions.is_in_list(variables[0], items) == True:
                print(
                    "Item is already in warehouse!\nIf you want change item status use 'update item ...'"
                )

            elif functions.confirm_choice() == True:
                logging.debug(f"adding {variables[0]}\n")
                x = functions.add_item(variables[0], variables[1],
Example #16
0
    def styleText(self, start, end):
        """
        Overloaded method for styling text.
        NOTE:
            Very slow if done in Python!
            Using the Cython version is better.
            The fastest would probably be adding the lexer directly into
            the QScintilla source. Maybe never :-)
        """
        #Style in pure Python, VERY SLOW!
        editor = self.editor()
        if editor is None:
            return
        #Initialize the styling
        self.startStyling(start)
        #Scintilla works with bytes, so we have to adjust the start and end boundaries
        text = bytearray(editor.text().lower(), "utf-8")[start:end].decode("utf-8")
        #Loop optimizations
        setStyling      = self.setStyling
        basic_kw_list   = self.basic_keyword_list
        user_kw_list    = self.user_keyword_list
        def_kw_list     = self.def_keyword_list
        top_kw_list     = self.top_keyword_list
        unsafe_kw_list  = self.unsafe_keyword_list
        operator_list   = self.operator_list
        keyword_operator_list = self.keyword_operator_list
        type_kw_list    = self.type_keyword_list
        DEF     = self.styles["Default"]
        B_KWD   = self.styles["BasicKeyword"]
        T_KWD   = self.styles["TopKeyword"]
        COM     = self.styles["Comment"]
        STR     = self.styles["String"]
        L_STR   = self.styles["LongString"]
        NUM     = self.styles["Number"]
        MAC     = self.styles["Pragma"]
        OPE     = self.styles["Operator"]
        UNS     = self.styles["Unsafe"]
        TYP     = self.styles["Type"]
        D_COM   = self.styles["DocumentationComment"]
        DEFIN   = self.styles["Definition"]
        CLS     = self.styles["Class"]
        KOP     = self.styles["KeywordOperator"]
        CHAR    = self.styles["CharLiteral"]
        OF      = self.styles["CaseOf"]
        U_KWD   = self.styles["UserKeyword"]
        M_COM   = self.styles["MultilineComment"]
        M_DOC   = self.styles["MultilineDocumentation"]
        #Initialize various states and split the text into tokens
        commenting          = False
        doc_commenting      = False
        multi_doc_commenting= False
        new_commenting      = False
        stringing           = False
        long_stringing      = False
        char_literal        = False
        pragmaing           = False
        case_of             = False
        cls_descrition      = False
        tokens = [(token, len(bytearray(token, "utf-8"))) for token in self.splitter.findall(text)]
        #Check if there is a style(comment, string, ...) stretching on from the previous line
        if start != 0:
            previous_style = editor.SendScintilla(editor.SCI_GETSTYLEAT, start - 1)
            if previous_style == L_STR:
                long_stringing = True
            elif previous_style == MAC:
                pragmaing = True
            elif previous_style == M_COM:
                new_commenting = True
            elif previous_style == M_DOC:
                multi_doc_commenting = True
        #Style the tokens accordingly
        for i, token in enumerate(tokens):
#                print(str(token) + "  " + str(i))
            if commenting == True:
                #Continuation of comment
                setStyling(token[1], COM)
                #Check if comment ends
                if "\n" in token[0]:
                    commenting = False
            elif doc_commenting == True:
                #Continuation of comment
                setStyling(token[1], D_COM)
                #Check if comment ends
                if "\n" in token[0]:
                    doc_commenting = False
            elif new_commenting == True:
                #Continuation of comment
                setStyling(token[1], M_COM)
                #Check if comment ends
                if "#" in token[0] and "]" in tokens[i-1][0]:
                    new_commenting = False
            elif multi_doc_commenting == True:
                #Continuation of comment
                setStyling(token[1], M_DOC)
                #Check if comment ends
                if "#" in token[0] and "#" in tokens[i-1][0] and "]" in tokens[i-2][0]:
                    multi_doc_commenting = False
            elif stringing == True:
                #Continuation of a string
                setStyling(token[1], STR)
                #Check if string ends
                if token[0] == "\"" and (tokens[i-1][0] != "\\") or "\n" in token[0]:
                    stringing = False
            elif long_stringing == True:
                #Continuation of a string
                setStyling(token[1], L_STR)
                #Check if string ends
                if token[0] == "\"\"\"":
                    long_stringing = False
            elif char_literal == True:
                #Check if string ends
                if ("\n" in token[0] or 
                    " " in token[0] or
                    "(" in token[0] or
                    ")" in token[0] or
                    "," in token[0] or
                    token[0] in operator_list):
                    #Do not color the separator
                    setStyling(token[1], DEF)
                    char_literal = False
                elif token[0] == "'":
                    #Continuation of a character
                    setStyling(token[1], CHAR)
                    char_literal = False
                else:
                    setStyling(token[1], CHAR)
            elif pragmaing == True:
                #Continuation of a string
                setStyling(token[1], MAC)
                #Check if string ends
                if token[0] == ".}":
                    pragmaing = False
            elif case_of == True:
                #'Case of' parameter
                if token[0] == ":" or "\n" in token[0]:
                    setStyling(token[1], DEF)
                    case_of = False
                else:
                    setStyling(token[1], OF)
            elif cls_descrition == True:
                #Class/namespace description
                if token[0] == ":" or "\n" in token[0]:
                    setStyling(token[1], DEF)
                    cls_descrition = False
                else:
                    setStyling(token[1], CLS)
            elif token[0] == "\"\"\"":
                #Start of a multi line (long) string
                setStyling(token[1], L_STR)
                long_stringing = True
            elif token[0] == "{.":
                #Start of a multi line (long) string
                setStyling(token[1], MAC)
                pragmaing = True
            elif token[0] == "\"":
                #Start of a string
                setStyling(token[1], STR)
                stringing = True
            elif token[0] == "'":
                #Start of a string
                setStyling(token[1], CHAR)
                char_literal = True
            elif token[0] in basic_kw_list:
                #Basic keyword
                setStyling(token[1], B_KWD)
                try:
                    if ((token[0] == "of" and "\n" in tokens[i-2][0]) or
                        ((token[0] == "of" and "\n" in tokens[i-1][0]))):
                        #Start of a CASE
                        case_of = True
                except IndexError:
                    case_of = False
            elif token[0] in user_kw_list:
                #User keyword
                setStyling(token[1], U_KWD)
            elif token[0] in top_kw_list:
                #Top keyword
                setStyling(token[1], T_KWD)
            elif token[0] in unsafe_kw_list:
                #Unsafe/danger keyword
                setStyling(token[1], UNS)
            elif token[0] in operator_list:
                #Operator
                setStyling(token[1], OPE)
            elif token[0] in keyword_operator_list:
                #Operator
                setStyling(token[1], KOP)
            elif token[0] in type_kw_list:
                #Operator
                setStyling(token[1], TYP)
            elif token[0] == "#":
                #Start of a comment or documentation comment
                if len(tokens) > i+2 and tokens[i+1][0] == "#" and tokens[i+2][0] == "[":
                    setStyling(token[1], M_DOC)
                    multi_doc_commenting = True
                elif len(tokens) > i+1 and tokens[i+1][0] == "#":
                    setStyling(token[1], D_COM)
                    doc_commenting = True
                elif len(tokens) > i+1 and tokens[i+1][0] == "[":
                    setStyling(token[1], M_COM)
                    new_commenting = True
                else:
                    setStyling(token[1], COM)
                    commenting = True
            elif (i > 1) and (("\n" in tokens[i-2][0]) or ("  " in tokens[i-2][0])) and (tokens[i-1][0] == "of"):
                #Case of statement
                case_of = True
                setStyling(token[1], OF)
            elif functions.is_number(token[0][0]):
                #Number
                #Check only the first character, because Nim has those weird constants e.g.: 12u8, ...)
                setStyling(token[1], NUM)
            elif ((i > 1) and (tokens[i-2][0] in user_kw_list) and token[0][0].isalpha()):
                #Class-like definition
                setStyling(token[1], CLS)
                cls_descrition = True
            elif (((i > 1) and (tokens[i-2][0] in def_kw_list and tokens[i-1][0] != "(") and token[0][0].isalpha()) or
                    ((i > 2) and (tokens[i-3][0] in def_kw_list and tokens[i-1][0] == '`') and token[0][0].isalpha())):
                #Proc-like definition
                setStyling(token[1], DEFIN)
            else:
                setStyling(token[1], DEF)
Example #17
0
 def styleText(self, start, end):
     """
     Overloaded method for styling text.
     NOTE:
         Very slow if done in Python!
         Using the Cython version is better.
         The fastest would probably be adding the lexer directly into
         the QScintilla source. Maybe never :-)
     """
     #Get the global cython flag
     if lexers.cython_lexers_found == True:
         #Cython module found
         lexers.cython_lexers.style_oberon(start, end, self, self.editor())
     else:
         #Style in pure Python, VERY SLOW!
         editor = self.editor()
         if editor is None:
             return
         #Initialize the styling
         self.startStyling(start)
         #Scintilla works with bytes, so we have to adjust the start and end boundaries
         text = bytearray(editor.text(), "utf-8")[start:end].decode("utf-8")
         #Loop optimizations
         setStyling = self.setStyling
         kw_list = self.keyword_list
         types_list = self.types_list
         DEF = self.styles["Default"]
         KWD = self.styles["Keyword"]
         COM = self.styles["Comment"]
         STR = self.styles["String"]
         PRO = self.styles["Procedure"]
         MOD = self.styles["Module"]
         NUM = self.styles["Number"]
         TYP = self.styles["Type"]
         #Initialize comment state and split the text into tokens
         commenting = False
         stringing = False
         tokens = [(token, len(bytearray(token, "utf-8")))
                   for token in self.splitter.findall(text)]
         #Check if there is a style(comment, string, ...) stretching on from the previous line
         if start != 0:
             previous_style = editor.SendScintilla(editor.SCI_GETSTYLEAT,
                                                   start - 1)
             if previous_style == COM:
                 commenting = True
         #Style the tokens accordingly
         for i, token in enumerate(tokens):
             if commenting == True:
                 #Continuation of comment
                 setStyling(token[1], COM)
                 #Check if comment ends
                 if token[0] == "*)":
                     commenting = False
             elif stringing == True:
                 #Continuation of a string
                 setStyling(token[1], STR)
                 #Check if string ends
                 if token[0] == "\"" or "\n" in token[0]:
                     stringing = False
             elif token[0] == "\"":
                 #Start of a string
                 setStyling(token[1], STR)
                 stringing = True
             elif token[0] in kw_list:
                 #Keyword
                 setStyling(token[1], KWD)
             elif token[0] in types_list:
                 #Keyword
                 setStyling(token[1], TYP)
             elif token[0] == "(*":
                 #Start of a comment
                 setStyling(token[1], COM)
                 commenting = True
             elif i > 1 and tokens[i - 2][0] == "PROCEDURE":
                 #Procedure name
                 setStyling(token[1], PRO)
             elif i > 1 and tokens[i - 2][0] == "MODULE":
                 #Module name (beginning)
                 setStyling(token[1], MOD)
             elif (i > 1 and tokens[i - 2][0]
                   == "END") and (len(tokens) - 1 >= i + 1):
                 #Module or procedure name (name)
                 if ";" in tokens[i + 1][0]:
                     #Procedure end
                     setStyling(token[1], PRO)
                 elif "." in tokens[i + 1][0]:
                     #Module end
                     setStyling(token[1], MOD)
                 else:
                     setStyling(token[1], DEF)
             elif functions.is_number(token[0]):
                 #Number
                 setStyling(token[1], NUM)
             else:
                 setStyling(token[1], DEF)
Example #18
0
 def styleText(self, start, end):
     """
     Overloaded method for styling text.
     NOTE:
         Very slow if done in Python!
         Using the Cython version is better.
         The fastest would probably be adding the lexer directly into
         the QScintilla source. Maybe never :-)
     """
     #Get the global cython flag
     if lexers.cython_lexers_found == True:
         #Cython module found
         lexers.cython_lexers.style_ada(start, end, self, self.editor())
     else:
         #Style in pure Python, VERY SLOW!
         editor = self.editor()
         if editor is None:
             return
         #Initialize the procedure/package counter
         pp_counter = []
         #Initialize the styling
         self.startStyling(0)
         #Scintilla works with bytes, so we have to adjust the start and end boundaries
         text = bytearray(editor.text().lower(), "utf-8").decode("utf-8")
         #Loop optimizations
         setStyling = self.setStyling
         kw_list = self.keyword_list
         DEF = self.styles["Default"]
         KWD = self.styles["Keyword"]
         COM = self.styles["Comment"]
         STR = self.styles["String"]
         PRO = self.styles["Procedure"]
         NUM = self.styles["Number"]
         PAC = self.styles["Package"]
         #            TYP = self.styles["Type"]
         #Initialize comment state and split the text into tokens
         commenting = False
         stringing = False
         tokens = [(token, len(bytearray(token, "utf-8")))
                   for token in self.splitter.findall(text)]
         #Style the tokens accordingly
         for i, token in enumerate(tokens):
             if commenting == True:
                 #Continuation of comment
                 setStyling(token[1], COM)
                 #Check if comment ends
                 if "\n" in token[0]:
                     commenting = False
             elif stringing == True:
                 #Continuation of a string
                 setStyling(token[1], STR)
                 #Check if string ends
                 if token[0] == "\"" or "\n" in token[0]:
                     stringing = False
             elif token[0] == "\"":
                 #Start of a string
                 setStyling(token[1], STR)
                 stringing = True
             elif token[0] in kw_list:
                 #Keyword
                 setStyling(token[1], KWD)
             elif token[0] == "--":
                 #Start of a comment
                 setStyling(token[1], COM)
                 commenting = True
             elif i > 1 and tokens[i - 2][0] == "procedure":
                 #Procedure name
                 setStyling(token[1], PRO)
                 #Mark the procedure
                 if tokens[i + 1][0] != ";":
                     pp_counter.append("PROCEDURE")
             elif i > 1 and (tokens[i - 2][0] == "package"
                             or tokens[i - 2][0] == "body"):
                 #Package name
                 setStyling(token[1], PAC)
                 #Mark the package
                 pp_counter.append("PACKAGE")
             elif (i > 1 and tokens[i - 2][0]
                   == "end") and (len(tokens) - 1 >= i + 1):
                 #Package or procedure name end
                 if len(pp_counter) > 0:
                     if pp_counter.pop() == "PACKAGE":
                         setStyling(token[1], PAC)
                     else:
                         setStyling(token[1], PRO)
                 else:
                     setStyling(token[1], DEF)
             elif functions.is_number(token[0]):
                 #Number
                 setStyling(token[1], NUM)
             else:
                 setStyling(token[1], DEF)
import functions as tf
from timer import Timer
from settings import Settings

timer = Timer()
timer_settings = Settings()

print(timer_settings.welcome_message)

while True:

    user_input = input('\nMenu: ')

    # Shutdown mode
    if tf.is_time(user_input) or tf.is_number(user_input):
        timer.start(user_input, '-s')

    # Reboot Mode
    elif user_input.lower() in timer_settings.reboot_commands:
        os.system('cls')
        print(timer_settings.reboot_menu)
        user_input = input('\nReboot mode: ')
        if tf.is_number(user_input) or tf.is_time(user_input):
            timer.start(user_input, '-r')
            tf.menu_return(2)
        else:
            tf.menu_return(0.5)

    # Print remaining time
    elif user_input.lower() in timer_settings.r_time_commands:
Example #20
0
#region = "*"
#region = "a5700-6100"
region = "a5250-6815"
normalise(file_name)

run_fxcor("temp.fits","mdwarf_template_norm.fits",region,"fxcor_stellar",0,False)
os.system("cat fxcor_stellar.txt")

### Now calculate RV
data = functions.read_ascii("fxcor_stellar.txt")
data = functions.read_table(data)

rv = []
rverr = []
for i in data:
    if functions.is_number(i[3]):
        hjd = i[3]+50000

    if functions.is_number(i[12]):
        if abs(i[12]) < 500 and abs(i[13]) < 500:
            rv.append(i[12])
            rverr.append(i[13])
    
RV = median(rv)
RV_err = median(rverr)

print "!!!!!!!!!!!"
print "RV",RV,"RVERR",RV_err

### Update database
import mysql_insert
Example #21
0
        type = "E(B-V)",\
        apertures = "*",\
        override = 1,\
        uncorrect = 0,\
        mode = "al")

    ### Create .dat file out of fits file redden_name
    
    os.system("rm " + redden_name + ".dat")

    iraf.wspectext(redden_name + "[*,1,1]", redden_name + ".dat")

    spectrum = functions.read_ascii(redden_name + ".dat")
    spectrum = functions.read_table(spectrum)
    temp = []
    for i in spectrum:
        if len(i) == 2:
            if functions.is_number(i[0]):
                temp.append(i)
    spectrum = temp
    spectrum = spectrum[1:len(spectrum)-2]

    output_spectrum = open(redden_name + ".dat","w")
    functions.write_table(spectrum,output_spectrum)
    output_spectrum.close()

    os.system("mv " + redden_name + ".dat deredden")
    os.system("mv " + redden_name + " deredden")

    redden = redden + redden_step
def main(sc):
    """
    Read GDELT data from S3, select columns, join tables,
    and perform calculations with grouped themes and document
    times
    """

    #Obtain taxonomy dictionary and broadcast to the workers
    tax_file = os.environ['TAX_LIST_FILE']
    tax_list = f.read_tax_file(tax_file)
    rdd_tax_list = sc.broadcast(tax_list)

    #Obtain list of top 500 themes used for filtering
    theme_file = os.environ['THEME_LIST_FILE']
    theme_list = f.read_theme_file(theme_file)
    rdd_theme_list = sc.broadcast(theme_list)

    #Obtainb list of top new src used for filtering
    src_file = os.environ['SRC_LIST_FILE']
    src_list = f.read_src_file(src_file)
    rdd_src_list = sc.broadcast(src_list)


    #Read "mentions" table from GDELT S3 bucket. Transform into RDD
    mentionRDD = sc.textFile('s3a://gdelt-open-data/v2/mentions/*.mentions.csv')
    mentionRDD = mentionRDD.map(lambda x: x.encode("utf", "ignore"))
    mentionRDD = mentionRDD.map(lambda x : x.split('\t'))
    mentionRDD = mentionRDD.filter(lambda x: len(x)==16)
    mentionRDD = mentionRDD.filter(lambda x: f.is_not_empty([x[2], x[5], x[13]]))
    mentionRDD = mentionRDD.filter(lambda x: f.is_number(x[13])) 
    mentionRowRDD = mentionRDD.map(lambda x : Row(
                                        mention_id = x[5],
                                        mention_doc_tone = float(x[13]),
                                        mention_time_date = f.transform_to_timestamptz_daily(x[2])
					))
 
    #Read 'GKG" table from GDELT S3 bucket. Transform into RDD
    gkgRDD = sc.textFile('s3a://gdelt-open-data/v2/gkg/YEARMONTH*0000.gkg.csv')
    gkgRDD = gkgRDD.map(lambda x: x.encode("utf", "ignore"))
    gkgRDD = gkgRDD.map(lambda x: x.split('\t'))
    gkgRDD = gkgRDD.filter(lambda x: len(x)==27)   
    gkgRDD = gkgRDD.filter(lambda x: f.is_not_empty([x[3], x[4], x[7]]))
    gkgRowRDD = gkgRDD.map(lambda x : Row(src_common_name = x[3],
                                        doc_id = x[4],
                                        themes = f.clean_taxonomy(x[7].split(';')[:-1], rdd_tax_list)
                                        ))


    sqlContext = SQLContext(sc)

    #Transform RDDs to dataframes
    mentionDF = sqlContext.createDataFrame(mentionRowRDD)
    gkgDF     = sqlContext.createDataFrame(gkgRowRDD)


    df1 = mentionDF.alias('df1')
    df2 = gkgDF.alias('df2')

    #Themes and tones information are stored in two different tables
    joinedDF = df1.join(df2, df1.mention_id == df2.doc_id, "inner").select('df1.*'
                                                , 'df2.src_common_name','df2.themes').repartition(2000)

    #Each document could contain multiple themes. Explode on the themes and make a new column on filtered themes
    explodedDF = joinedDF.select('mention_id'
				, 'mention_doc_tone'
                                , 'mention_time_date'
				, 'src_common_name'
                                , explode(joinedDF.themes).alias("theme")) \
                                .filter(col('theme').isin(*(rdd_theme_list.value)))

    hist_data_udf = udf(f.hist_data, ArrayType(IntegerType()))
    get_quantile_udf = udf(f.get_quantile, ArrayType(FloatType()))
    
    #Compute statistics for each theme at a time
    explodedDF.cache()
    
    #Over all sources
    testDF1 = explodedDF.groupBy('theme', 'mention_time_date').agg(
            count('*').alias('num_mentions'),
            avg('mention_doc_tone').alias('avg'),
            collect_list('mention_doc_tone').alias('tones')
            )
    
    #For each source 
    testDF2 = explodedDF.groupBy('theme', 'mention_time_date', 'src_common_name').agg(
            count('*').alias('num_mentions'),
            avg('mention_doc_tone').alias('avg'),
            collect_list('mention_doc_tone').alias('tones')
            ).repartition(2000)
    
    #Histogram and compute quantiles for tones
    
    histDF1 = testDF1.withColumn("bin_vals", hist_data_udf('tones')) \
                   .withColumn("quantiles", get_quantile_udf('tones'))
    
    histDF2 = testDF2.withColumn("bin_vals", hist_data_udf('tones')) \
                   .withColumn("quantiles", get_quantile_udf('tones'))
   
    
    finalDF1 = histDF1.select('theme', 'num_mentions', 'avg', 'quantiles', 'bin_vals', col('mention_time_date').alias('time'))
    #Filter sources
    finalDF2 = histDF2.select('theme', 'src_common_name', 'num_mentions', 'avg', 'quantiles', 'bin_vals', 
            col('mention_time_date').alias('time')).filter(col('src_common_name').isin(*(rdd_src_list.value)))
    
    
    
    #Preparing to write to TimescaleDB
    #Fist write to group-by-src table
    
    db_properties = {}
    config = configparser.ConfigParser()
    
    config.read("db_properties.ini")
    db_prop = config['postgresql']
    db_url = db_prop['url']
    db_properties['username'] = db_prop['username']
    db_properties['password'] = db_prop['password']
    db_properties['url'] = db_prop['url']
    db_properties['driver'] = db_prop['driver']

    #Write to table
    finalDF1.write.format("jdbc").options(
    url=db_properties['url'],
    dbtable='bubblebreaker_schema.tones_table_v3',
    user='******',
    password='******',
    stringtype="unspecified"
    ).mode('append').save()
    
    #Then write to per-src table

    config.read("db_properties_src.ini")
    db_prop = config['postgresql']
    db_url = db_prop['url']
    db_properties['username'] = db_prop['username']
    db_properties['password'] = db_prop['password']
    db_properties['url'] = db_prop['url']
    db_properties['driver'] = db_prop['driver']

    #Write to table
    finalDF2.write.format("jdbc").options(
    url=db_properties['url'],
    dbtable='bubblebreaker_src_schema.tones_table_v2',
    user='******',
    password='******',
    stringtype="unspecified"
    ).mode('append').save()
Example #23
0
    def read(self, data_file, varname):
        '''Given an IMB source file, reads the data into a data series object'''

        import csv
        import linekey
        import functions

        key = linekey.get_linekey(data_file, [varname], self.name)

        vscale_vars = [
            'surface', 'interface', 'bottom', 'snow depth', 'ice thickness'
        ]

        if (key is None or key.value_index.count(-1) > 0):
            print('Could not find variable ' + varname)
            self.data_list = {}
            return None

        fileh = open(data_file)
        rows = csv.reader(fileh)

        for row in rows:
            if len(row) > 0:
                date_string = row[key.date_index]
                date = process_dates(date_string, self.name, self.fen)

                if (date is not None):

                    if key.value_index[0] < len(row):
                        value_string = row[key.value_index[0]]
                        if functions.is_number(value_string):
                            value = float(value_string)

                            if (key.lat_flip_ns[0] and key.lat_flip_ns[1]
                                    == key.value_index[0]):
                                ns_value = row[key.lat_flip_ns[2]]
                                if (ns_value == 'S'):
                                    value = 0. - value

                            if (key.lon_flip_ew[0] and key.lon_flip_ew[1]
                                    == key.value_index[0]):
                                ew_value = row[key.lon_flip_ew[2]]
                                if (ew_value == 'W'):
                                    value = 0. - value

                            if key.fliplon and varname == 'longitude':
                                value = 0. - value

                            if varname in vscale_vars:
                                value = value * key.vertical_scale

                            self.data_list[date] = value

                        else:
                            if varname in ['latitude', 'longitude']:

                                first_part = value_string[:-2]
                                second_part = value_string[-2:]
                                if functions.is_number(
                                        first_part) and second_part.strip(
                                        ) in ['N', 'S', 'E', 'W']:
                                    value = float(first_part)
                                    if second_part.strip() in ['S', 'W']:
                                        value = 0. - value
                                    self.data_list[date] = value

        if len(self.data_list) > 0:
            self.type = 'irregular'

        fileh.close()
    def signingIn(self, Dialog):


        uName = self.uNameLine.text()
        uPassword = self.uPasswordLine.text()
        ip = self.IPline.text()
        data.username = uName
        data.password = uPassword

        client.IP = ip
        if not client.known_user(uName, uPassword):
            self.infLabel.setText("Wrong login/password")
            return


        if len(data.username)!=0 and len(data.userid)!=0 and len(data.password)!=0 and len( client.IP)!=0 and functions.is_number(data.userid):
            print("start")
            try:
                 data.userid = client.get_id(uName)
                 data.balance = (client.get_balance(uName), "$")
                 if self.size.currentText() == "Big":
                     data.scale = [1.14, 1.2, 4.5, 1.4]
                     data.scale_ = [2.1,1.5,4.3,3]
                 functions.putPersonalData()
                 mainWindow.runGUI()
                 #print("singIn")

            except:
                print("Error while starting app")
                self.clearWin(Dialog)

        else:
            self.infLabel.setText("Error. Try Again!")

        return
Example #25
0
### Find info from the fits header
hdulist = pyfits.open(file_path_reduced+"normspec_" + file_name)
object_name = hdulist[0].header["OBJECT"]
dateobs = hdulist[0].header["DATE-OBS"]
mjd = hdulist[0].header["MJD-OBS"]
exptime = hdulist[0].header["EXPTIME"]
comment = hdulist[0].header["NOTES"]
hdulist.close()

### Read info from text files in reduced/
RV_dat = functions.read_ascii("RV.dat")
RV_dat = functions.read_table(RV_dat)

for entry in RV_dat:
    if entry[0] == object_name and entry[1] == file_name:
        if functions.is_number(entry[2]):
            hjd = entry[2] + 50000
            RV = entry[3]
            RV_err = entry[4]
            ccf_height = entry[5]

ccf_log = functions.read_ascii("ccf_log.txt")
ccf_log = functions.read_table(ccf_log)

ccf_fwhm = 0
bis = 0
bis_err = 0

for entry in ccf_log:
    if entry[0] == file_name and entry[1] == object_name:
        ccf_fwhm = entry[3]