예제 #1
0
파일: server.py 프로젝트: restran/hydrogen
    def __init__(self):
        tornado_settings = dict(
            debug=False,
            static_path=get_path('static'),
        )

        tornado.web.Application.__init__(self, url_handlers,
                                         **tornado_settings)

        # Mac 环境下 records.Database 创建数据库连接这一步会卡住,不知道什么原因
        if sys.platform != 'darwin':
            self.database = records.Database('sqlite:///' +
                                             get_path('data/database.db'))
            sql = "select name from sqlite_master where type='table' order by name"
            with self.database.get_connection() as conn:
                rows = conn.query(sql).as_dict()
            name_set = set([t['name'] for t in rows])
            if 'interceptor' not in name_set:
                with self.database.get_connection() as conn:
                    conn.query(SQL_CREATE_TABLE_INTERCEPTOR)

            if 'http_history' not in name_set:
                with self.database.get_connection() as conn:
                    conn.query(SQL_CREATE_TABLE_HTTP_HISTORY)
                    for t in SQL_CREATE_INDEX_HTTP_HISTORY:
                        conn.query(t)
        else:
            self.database = None
예제 #2
0
    def save_mat_high_fq(self, file_name):
        import scipy.io as sio
        if st.get_path(st.HF_PATH):
            save_path = st.get_path(st.HF_PATH)
        else:
            self.show_custom_message(
                "Warning",
                "Please select a location where you would like your high "
                "frequency matlab data "
                "to be saved. Click save again once selected.")
            return

        for key, value in self.sens_objects.items():
            sl_data = self.sens_objects[key].get_flat_data().copy()
            sl_data = self.remove_9s(sl_data)
            sl_data = sl_data - int(self.sens_objects[key].height)
            time = filt.datenum2(self.sens_objects[key].get_time_vector())
            data_obj = [time, sl_data]
            # transposing the data so that it matches the shape of the UHSLC matlab format
            matlab_obj = {
                'NNNN': file_name + key.lower(),
                file_name + key.lower(): np.transpose(data_obj, (1, 0))
            }
            try:
                sio.savemat(save_path + '/' + file_name + key.lower() + '.mat',
                            matlab_obj)
                self.show_custom_message(
                    "Success", "Success \n" + file_name + key.lower() +
                    '.mat' + " Saved to " + st.get_path(st.HF_PATH) + "\n")
            except IOError as e:
                self.show_custom_message(
                    "Error", "Cannot Save to high frequency (.mat) data to" +
                    st.get_path(st.HF_PATH) + "\n" + str(e) +
                    "\n Please select a different path to save to")
예제 #3
0
    def __init__(self, parent=None):
        super(HelpScreen, self).__init__()

        # Object for data persistence
        # self.settings = QtCore.QSettings('UHSLC', 'com.uhslc.qcsoft')
        # st.SETTINGS.remove("savepath")
        self.ui = parent

        # If a save path hasn't been defined, give it a home directory
        if (st.get_path(st.SAVE_KEY)):
            self.ui.lineEditPath.setPlaceholderText(st.get_path(st.SAVE_KEY))
        else:
            st.SETTINGS.setValue(st.SAVE_KEY, os.path.expanduser('~'))
            self.ui.lineEditPath.setPlaceholderText(os.path.expanduser('~'))

        self.ui.lineEditLoadPath.setPlaceholderText(st.get_path(st.LOAD_KEY))

        # If a fast delivery save path hasn't been defined, give it a home directory
        if (st.get_path(st.FD_PATH)):
            self.ui.lineEditFDPath.setPlaceholderText(st.get_path(st.FD_PATH))
        else:
            st.SETTINGS.setValue(st.FD_PATH, os.path.expanduser('~'))
            self.ui.lineEditFDPath.setPlaceholderText(os.path.expanduser('~'))

        # If a high frequency data save path hasn't been defined, give it a home directory
        if (st.get_path(st.HF_PATH)):
            self.ui.lineEditHFPath.setPlaceholderText(st.get_path(st.HF_PATH))
        else:
            st.SETTINGS.setValue(st.HF_PATH, os.path.expanduser('~'))
            self.ui.lineEditHFPath.setPlaceholderText(os.path.expanduser('~'))

        if st.get_path(st.DIN_PATH):
            self.ui.lineEdit_din.setPlaceholderText(st.get_path(st.DIN_PATH))

        saveButton = self.ui.pushButton_save_folder
        loadButton = self.ui.pushButton_load_folder
        dinSave = self.ui.pushButton_din
        FDSave = self.ui.pushButton_fd_folder
        hf_save = self.ui.pushButton_hf_data

        saveButton.clicked.connect(
            lambda: self.savePath(self.ui.lineEditPath, st.SAVE_KEY))
        loadButton.clicked.connect(
            lambda: self.savePath(self.ui.lineEditLoadPath, st.LOAD_KEY))
        dinSave.clicked.connect(
            lambda: self.saveDIN(self.ui.lineEdit_din, st.DIN_PATH))
        FDSave.clicked.connect(
            lambda: self.savePath(self.ui.lineEditFDPath, st.FD_PATH))
        hf_save.clicked.connect(
            lambda: self.savePath(self.ui.lineEditFDPath, st.HF_PATH))
예제 #4
0
def write_to_file(clipboard_data):
    temp = str(date.today()) + '.txt'
    filename = get_path() + '\\' + temp
    print(filename)
    cur_time = datetime.now()
    timestamp = cur_time.strftime("%H:%M:%S")

    with open(filename, 'a') as the_file:
        the_file.write(timestamp + ": " + clipboard_data + "\n\n")
예제 #5
0
 def savePath(self, lineEditObj, setStr):
     folder_name = QtWidgets.QFileDialog.getExistingDirectory(
         self, 'Select a Folder')
     if (folder_name):
         st.SETTINGS.setValue(setStr, folder_name)
         st.SETTINGS.sync()
         lineEditObj.setPlaceholderText(st.get_path(setStr))
         lineEditObj.setText("")
     else:
         pass
예제 #6
0
 def saveDIN(self, lineEditObj, setStr):
     filters = "*.din"
     if st.DIN_PATH:
         path = st.DIN_PATH
     else:
         path = os.path.expanduser('~')
     file_name = QtWidgets.QFileDialog.getOpenFileNames(
         self, 'Open File', path, filters)
     if (file_name):
         st.SETTINGS.setValue(setStr, file_name[0][0])
         st.SETTINGS.sync()
         lineEditObj.setPlaceholderText(st.get_path(setStr))
         lineEditObj.setText("")
     else:
         pass
예제 #7
0
    def __init__(self):
        tornado_settings = dict(
            debug=False,
            static_path=get_path('static'),
        )
        self.database = records.Database('sqlite:///database.db')
        tornado.web.Application.__init__(self, url_handlers, **tornado_settings)

        sql = "select name from sqlite_master where type='table' order by name"
        rows = self.database.query(sql).as_dict()
        name_set = set([t['name'] for t in rows])
        if 'interceptor' not in name_set:
            self.database.query(SQL_CREATE_TABLE_INTERCEPTOR)

        if 'http_history' not in name_set:
            self.database.query(SQL_CREATE_TABLE_HTTP_HISTORY)
            for t in SQL_CREATE_INDEX_HTTP_HISTORY:
                self.database.query(t)
예제 #8
0
 def get(self):
     path = get_path('templates/index.html')
     with open(path, 'rb') as f:
         self.write(f.read())
     self.set_header('Content-Type', 'text/html')
예제 #9
0
    def save_fast_delivery(self, _data):
        import scipy.io as sio
        # 1. Check if the .din file was added and that it still exist at that path
        #       b) also check that a save folder is set up
        # 2. If it does. load in the primary channel for our station
        # 3. If it does not exist, display a warning message on how to add it and that the FD data won't be saved
        # 4. Perform filtering and save
        din_path = None
        save_path = None
        if st.get_path(st.DIN_PATH):
            din_path = st.get_path(st.DIN_PATH)
        else:
            self.show_custom_message(
                "Warning",
                "The fast delivery data cannot be processed because you haven't selected"
                "the .din file location. Press F1 to access the menu to select it. And "
                "then click the save button again.")
            return

        if st.get_path(st.FD_PATH):
            save_path = st.get_path(st.FD_PATH)
        else:
            self.show_custom_message(
                "Warning",
                "Please select a location where you would like your hourly and daily data"
                "to be saved. Click save again once selected.")
            return

        data_obj = {}

        station_num = _data["PRD"].type[0:-3]
        primary_sensor = filt.get_channel_priority(
            din_path, station_num)[0].upper(
            )  # returns multiple sensor in order of importance
        if primary_sensor not in _data:
            self.show_custom_message(
                "Error", f"Your .din file says that {primary_sensor} "
                f"is the primary sensor but the file you have loaded does "
                f"not contain that sensor. Hourly and daily data will not be saved."
            )
            return
        sl_data = _data[primary_sensor].get_flat_data().copy()
        sl_data = self.remove_9s(sl_data)
        sl_data = sl_data - int(_data[primary_sensor].height)
        data_obj[primary_sensor.lower()] = {
            'time': filt.datenum2(_data[primary_sensor].get_time_vector()),
            'station': station_num,
            'sealevel': sl_data
        }

        year = _data[primary_sensor].date.astype(object).year
        month = _data[primary_sensor].date.astype(object).month

        #  Filter to hourly
        data_hr = filt.hr_process_2(data_obj,
                                    filt.datetime(year, month, 1, 0, 0, 0),
                                    filt.datetime(year, month + 1, 1, 0, 0, 0))

        # for channel parameters see filt.channel_merge function
        # We are not actually merging channels here (not needed for fast delivery)
        # But we still need to run the data through the merge function, even though we are only using one channel
        # in order to get the correct output data format suitable for the daily filter
        ch_params = [{primary_sensor.lower(): 0}]
        hourly_merged = filt.channel_merge(data_hr, ch_params)

        # Note that hourly merged returns a channel attribute which is an array of integers representing channel type.
        # used for a particular day of data. In Fast delivery, all the number should be the same because no merge
        # int -> channel name mapping is inside of filtering.py var_flag function
        data_day = filt.day_119filt(hourly_merged,
                                    _data[primary_sensor].location[0])

        month_str = "{:02}".format(month)
        hourly_filename = save_path + '/' + 'th' + str(station_num) + str(
            year)[-2:] + month_str + '.mat'
        daily_filename = save_path + '/' + 'da' + str(station_num) + str(
            year)[-2:] + month_str + '.mat'
        sio.savemat(hourly_filename, data_hr)
        sio.savemat(daily_filename, data_day)
        self.show_custom_message(
            "Success", "Success \n Hourly and Daily Date Saved to " +
            st.get_path(st.FD_PATH) + "\n")

        monthly_mean = np.round(np.nanmean(data_day['sealevel'])).astype(int)
        # Remove nans, replace with 9999 to match the legacy files
        nan_ind = np.argwhere(np.isnan(data_day['sealevel']))
        data_day['sealevel'][nan_ind] = 9999
        sl_round_up = np.round(data_day['sealevel']).astype(
            int)  # round up sealevel data and convert to int

        # right justify with 5 spaces
        sl_str = [str(x).rjust(5, ' ')
                  for x in sl_round_up]  # convert data to string

        daily_filename = save_path + '/' + 'da' + str(station_num) + str(
            year)[-2:] + month_str + '.dat'

        # format the date and name strings to match the legacy .dat format
        month_str = str(month).rjust(2, ' ')
        station_name = _data[primary_sensor].name.ljust(7)
        line_begin_str = f'{station_name}WOC {year}{month_str}'
        counter = 1
        try:
            with open(daily_filename, 'w') as the_file:
                for i, sl in enumerate(sl_str):
                    if i % 11 == 0:
                        line_str = line_begin_str + str(
                            counter) + " " + ''.join(sl_str[i:i + 11])
                        if counter == 3:
                            line_str = line_str.ljust(75)
                            final_str = line_str[:-5] + str(monthly_mean)
                            line_str = final_str
                        the_file.write(line_str + "\n")
                        counter += 1
        except IOError as e:
            self.show_custom_message(
                "Error", "Cannot Save to " + daily_filename + "\n" + str(e) +
                "\n Please select a different path to save to")
예제 #10
0
    def save_to_ts_files(self):
        # Deleting tkey "ALL" from the list of sensors
        if "ALL" in self.sens_objects:
            del self.sens_objects["ALL"]
        if (self.sens_objects):
            months = len(
                self.sens_objects["PRD"].line_num)  # amount of months loaded
            # print("Amount of months loaded", months)
            assem_data = [
                [] for j in range(months)
            ]  # initial an empty list of lists with the number of months
            nan_ind = np.argwhere(np.isnan(self.browser.data))
            # print("NAN INDICES",nan_ind)
            # self.browser.data[nan_ind] = 9999
            # self.sens_objects[self.sens_str].data = self.browser.data
            # separate PRD from the rest because it has to be saved on the top file
            # Because dictionaries are unordered
            prd_list = [[] for j in range(months)]

            # Cycle through each month loaded
            for m in range(months):
                # Cycle through each month loaded, where key is the sensor name
                # Use value instead of self.sens_objects[key]?
                for key, value in self.sens_objects.items():
                    # Add header
                    # separate PRD from the rest because it has to be saved on the top file
                    if (key == "PRD"):
                        prd_list[m].append(
                            self.sens_objects[key].header[m].strip("\n"))
                    else:
                        assem_data[m].append(
                            self.sens_objects[key].header[m].strip("\n"))
                    # The ugly range is calculating start and end line numbers for each month that was Loaded
                    # so that the data can be saved to separate, monthly files
                    for i in range(
                            sum(self.sens_objects[key].line_num[:]) -
                            sum(self.sens_objects[key].line_num[m:]),
                            sum(self.sens_objects[key].line_num[:]) -
                            sum(self.sens_objects[key].line_num[m:]) +
                            self.sens_objects[key].line_num[m]):
                        # File formatting is differs based on the sampling rate of a sensor
                        if (int(self.sens_objects[key].rate) >= 5):
                            # Get only sealevel reading, without anything else (no time/date etc)
                            data = ''.join(
                                '{:5.0f}'.format(e)
                                for e in self.sens_objects[key].data.flatten()
                                [i * 12:12 + i * 12].tolist())
                            # The columns/rows containing only time/data and no sealevel measurements
                            it_col_formatted = '  ' + self.sens_objects[key].type + '  ' + \
                                               self.sens_objects[key].time_info[i][8:12].strip()[-2:] + \
                                               self.sens_objects[key].time_info[i][12:20]
                            # assem_data.append(info_time_col[i][0:]+data)
                            if (key == "PRD"):
                                prd_list[m].append(''.join(it_col_formatted) +
                                                   data)
                            else:
                                assem_data[m].append(
                                    ''.join(it_col_formatted) + data)
                        else:
                            data = ''.join(
                                '{:4.0f}'.format(e)
                                for e in self.sens_objects[key].data.flatten()
                                [i * 15:15 + i * 15].tolist())
                            it_col_formatted = '  ' + self.sens_objects[key].type + '  ' + \
                                               self.sens_objects[key].time_info[i][8:12].strip()[-2:] + \
                                               self.sens_objects[key].time_info[i][12:20]
                            # assem_data.append(info_time_col[i][0:]+data)
                            assem_data[m].append(''.join(it_col_formatted) +
                                                 data)
                    if (key == "PRD"):
                        prd_list[m].append('9' * 80)
                    else:
                        assem_data[m].append('9' * 80)
                del data
                # find the start date lines of each monp file that was loaded
                date_str = self.sens_objects[key].time_info[
                    sum(self.sens_objects[key].line_num[:]) -
                    sum(self.sens_objects[key].line_num[m:])]
                month_int = int(date_str[12:14][-2:])
                month_str = "{:02}".format(month_int)
                year_str = date_str[8:12][-2:]
                station_num = self.sens_objects[key].type[0:-3]
                file_name = 't' + station_num + year_str + month_str
                file_extension = '.dat'
                try:
                    with open(
                            st.get_path(st.SAVE_KEY) + '/' + file_name +
                            file_extension, 'w') as the_file:
                        for lin in prd_list[m]:
                            the_file.write(lin + "\n")
                        for line in assem_data[m]:
                            the_file.write(line + "\n")
                        # Each file ends with two lines of 80 9s that's why adding an additional one
                        the_file.write('9' * 80 + "\n")
                    self.show_custom_message(
                        "Success", "Success \n" + file_name + file_extension +
                        " Saved to " + st.get_path(st.SAVE_KEY) + "\n")
                except IOError as e:
                    self.show_custom_message(
                        "Error", "Cannot Save to " + st.get_path(st.SAVE_KEY) +
                        "\n" + str(e) +
                        "\n Please select a different path to save to")
                self.save_fast_delivery(self.sens_objects)
                self.save_mat_high_fq(file_name)
            # if result == True:
            #     print("Succesfully changed to: ", str(self.refLevelEdit.text()))
        else:
            self.show_custom_message("Warning", "You haven't loaded any data.")
예제 #11
0
                '\nPlease enter a location for storing logs (Press enter to use default location): '
            )

            if path == '' or settings.validate_path(path):
                if not path == '':
                    settings.update_path(path)
                break
            print('Invalid path!')

        # create installation dir
        settings.create_file_loc()

        # install dependencies
        print(
            '\nInstalling dependencies...Please make sure your Python version is >= 3.9.0\n'
        )
        helper.sleep_wait(2)
        subprocess.run(["pip", "install", "-r", "requirements.txt"])

        settings.update_first_launch()
    else:
        print('Current location for Clipboard logs is: ' + settings.get_path())

    print('\nAll set! Keep copying :)')

    # start running in background
    DETACHED_PROCESS = 8
    subprocess.Popen("python helper.py",
                     creationflags=DETACHED_PROCESS,
                     close_fds=True)
예제 #12
0
파일: utils.py 프로젝트: restran/hydrogen
def gen_signed_cert(domain):
    """
    This function takes a domain name as a parameter and then creates a certificate and key with the
    domain name(replacing dots by underscores), finally signing the certificate using specified CA and
    returns the path of key and cert files. If you are yet to generate a CA then check the top comments
    """
    ca_crt = os.path.join(get_path('data/ca'), 'ca.crt')
    ca_key = os.path.join(get_path('data/ca'), 'ca.key')
    key_path = os.path.join(get_path('data/ca'), 'cert.key')
    certs_folder = get_path('data/certs')

    cert_path = os.path.join(
        certs_folder,
        domain.replace('.', '_').replace('*', '_') + '.crt')
    if os.path.exists(key_path) and os.path.exists(cert_path):
        return key_path, cert_path

    # Check happens if the certificate and key pair already exists for a domain
    if os.path.exists(key_path) and os.path.exists(cert_path):
        pass
    else:
        with FileLock(cert_path, timeout=2):
            # Check happens if the certificate and key pair already exists for a domain
            if os.path.exists(key_path) and os.path.exists(cert_path):
                pass
            else:
                # The CA stuff is loaded from the same folder as this script
                # ca_cert = load_pem_x509_certificate(open(ca_crt, 'rb').read(), default_backend())
                # The last parameter is the password for your CA key file
                ca_key = load_pem_private_key(
                    open(ca_key, 'rb').read(), None, default_backend())
                key = load_pem_private_key(
                    open(key_path, 'rb').read(), None, default_backend())

                # key.generate_key(crypto.TYPE_RSA, 2048)
                subject = issuer = x509.Name([
                    x509.NameAttribute(NameOID.COUNTRY_NAME, 'US'),
                    x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, 'CA'),
                    x509.NameAttribute(NameOID.LOCALITY_NAME, 'San Francisco'),
                    x509.NameAttribute(NameOID.ORGANIZATION_NAME, "Hydrogen"),
                    x509.NameAttribute(NameOID.COMMON_NAME, domain),
                ])

                cert = x509.CertificateBuilder(
                ).subject_name(subject).issuer_name(issuer).public_key(
                    key.public_key()).serial_number(x509.random_serial_number(
                    )).not_valid_before(datetime.utcnow()).not_valid_after(
                        # Our certificate will be valid for 365 * 2 days
                        datetime.utcnow() +
                        timedelta(days=365 * 2)).add_extension(
                            x509.SubjectAlternativeName([x509.DNSName(domain)
                                                         ]),
                            critical=False,
                            # Sign our certificate with our private key
                        ).sign(ca_key, hashes.SHA256(), default_backend())
                # Write our certificate out to disk.
                # The key and cert files are dumped and their paths are returned
                domain_cert = open(cert_path, 'wb')
                cert.public_bytes(serialization.Encoding.PEM)
                domain_cert.write(cert.public_bytes(
                    serialization.Encoding.PEM))
                logger.warning(
                    ("[*] Generated signed certificate for %s" % domain))
    return key_path, cert_path
예제 #13
0
파일: views.py 프로젝트: kkju/blog
def welcome(requset):
    t = get_template('welcome.html')
    print settings.get_path('/static/')
    html = t.render(Context({"STATIC_URL":settings.get_path('/static/')}))
    return HttpResponse(html)
예제 #14
0
파일: views.py 프로젝트: kkju/blog
import datetime
from Blogdetail.models import Blogs
import settings


def welcome(requset):
    t = get_template('welcome.html')
    print settings.get_path('/static/')
    html = t.render(Context({"STATIC_URL":settings.get_path('/static/')}))
    return HttpResponse(html)

def list_all(requset):
    try:
        data = Blogs.objects.all()
    except Exception, e:
        print e
        data = {}
    t = get_template('list_all.html')
    html = t.render(Context({'data':data,'STATIC_URL':settings.get_path('/static/')}))
    return HttpResponse(html)

def details(requset,offset):
    try:
        offset = int(offset)
    except ValueError:
        raise Http404()
    dt = datetime.datetime.now() + datetime.timedelta(hours=offset)
    t = get_template('time.html')
    html =  t.render(Context({'date':dt,'hours':offset}))
    return HttpResponse(html)
예제 #15
0
파일: build.py 프로젝트: CicadaInc/Emoveo

SPEC_PATH_ONE_FILE = "run_desktop_onefile.spec"
SPEC_PATH = "run_desktop.spec"
TEMP_SPEC_PATH = '_spec.spec'

one_file = input('One file? [y/n]\n').lower().startswith('y')
include_data = input('Include data? [y/n]\n').lower().startswith('y')

with open(SPEC_PATH_ONE_FILE if one_file else SPEC_PATH,
          mode='r') as spec_file:
    with open(TEMP_SPEC_PATH, mode='w') as temp_spec_file:
        temp_spec_file.write(
            (spec_file.read().replace("datas=[]", "datas=" + str(make_datas()))
             if include_data else spec_file.read()).replace(
                 'icon=""', ('icon="%s"' % (get_path('UI\\icon.ico'), ))
                 if os.path.isfile(get_path('UI\\icon.ico')) else ''))

if os.path.isdir("build"):
    print("Deleting old build")
    os.remove("build")
if os.path.isdir("dist"):
    print("Deleting old dist")
    os.remove("dist")

print('Building...')
os.system("pyinstaller %s" % (TEMP_SPEC_PATH, ))
print('Exiting')

if one_file and not include_data:
    print("Moving .exe to root directory")