Ejemplo n.º 1
0
def quitapplication():
    ok = messagebox.askokcancel("Quit", "Do you want to quit?")
    if ok:

        logger.info(messages.QUIT_START)

        #Clear put global variables
        global currtable
        global currdb
        global dbconn

        currtable = None
        currdb = None

        #Close the connection to the DB if open. No commit happens here though
        if dbconn != None:
            dbconn.close()
            print("Closed the DB connection, closing the App")
            dbconn = None

        #Backing up all the Databases present in Storage folder
        utils.backup(PROJECT_ROOT, SERVER, BACKUPS)

        #Shutdown the Logging instance
        logger.info(messages.LOGGER_SHUTDOWN)
        exitlogman(logger)

        window.destroy()
        window.quit()
Ejemplo n.º 2
0
def main(args):
    utils.main_load()
    outputfile =  args.optf if args.optf else '{0:s}.sespacing.xvg'.format(args.grof)
    utils.backup(outputfile)
    outputf = open(outputfile, 'w')
    beginning_time = utils.write_header(outputf)

    # This line will be used when there is a better code design
    # if ARGS.atom_sel is None:
    #     raise ValueError("atom_selection must be specified, check --atom_selection option!")

    # do calculation
    ijdist_dict = sequence_spacing(args.grof, args.xtcf, args.btime, args.etime,
                                   args.peptide_length, args.atom_sel)

    # cannot yield from sequence_spacing function because the result cannot be
    # calculated until all frames have been looped through

    # write headers
    outputf.write('# {0:8s}{1:20s}{2:20s}{3:10s}\n'.format('i-j', 'average', 'std', 'num_of_data_points'))
    # write results to the outputfile
    for k in sorted(ijdist_dict.keys()):
        data = np.array(ijdist_dict[k])
        mean = data.mean()                      # mean of ijdist
        std = data.std()                        # standard deviation of ijdist
        num = len(data)                         # num of data in that ijdist
        outputf.write('{0:8d}{1:20.8f}{2:20.8f}{3:10d}\n'.format(k, mean, std, num))

    # Do some logging at the end
    utils.write_footer(outputf, beginning_time)
    outputf.close()
Ejemplo n.º 3
0
def main(args):
    utils.main_load()

    # check the validity of output file name, do backup
    output = args.optf
    if output is None:
        outputfile = '{0:s}.output.xvg'.format(args.grof)
    else:
        outputfile = output

    utils.backup(outputfile)

    # Do some logging at the beginning
    outputf = open(outputfile, 'w')
    beginning_time = utils.write_header(outputf)

    # do calculation
    result = calc_dihedral(args.grof, args.xtcf, args.btime, args.etime)

    # write results to the outputfile
    for r in result:
        outputf.write(r)

    # Do some logging at the end
    utils.write_footer(outputf, beginning_time)

    outputf.close()
Ejemplo n.º 4
0
def populate_csv_file():

    backup(csv_path)

    f_csv = CsvFile(csv_path, 'w')

    seen_courses = set()

    course_tags_soup = BeautifulSoup(open(html_path), 'html.parser')

    for course_tag in course_tags_soup.findAll('a'):

        displayed_course_information = course_tag.contents[0]

        department_and_level_regex = '[A-Zx0-9 \.\-]+'

        if re.match('%s \- ' % department_and_level_regex, displayed_course_information):
            department_and_level, title = displayed_course_information.split(' - ', 1)
        elif re.search(' \- %s($|\s)' % department_and_level_regex, displayed_course_information):
            title, department_and_level = displayed_course_information.rsplit(' - ', 1)
        else:
            title, department_and_level = displayed_course_information, ''

        url = course_tag.get('href')

        if (title, department_and_level) not in seen_courses:
            f_csv.add_row(title, department_and_level, url)

        seen_courses.add((title, department_and_level))

    f_csv.close()
Ejemplo n.º 5
0
def main(args):
    utils.main_load()
    outputfile = args.optf if args.optf else '{0:s}.sespacing.xvg'.format(
        args.grof)
    utils.backup(outputfile)
    outputf = open(outputfile, 'w')
    beginning_time = utils.write_header(outputf)

    # This line will be used when there is a better code design
    # if ARGS.atom_sel is None:
    #     raise ValueError("atom_selection must be specified, check --atom_selection option!")

    # do calculation
    ijdist_dict = sequence_spacing(args.grof, args.xtcf, args.btime,
                                   args.etime, args.peptide_length,
                                   args.atom_sel)

    # cannot yield from sequence_spacing function because the result cannot be
    # calculated until all frames have been looped through

    # write headers
    outputf.write('# {0:8s}{1:20s}{2:20s}{3:10s}\n'.format(
        'i-j', 'average', 'std', 'num_of_data_points'))
    # write results to the outputfile
    for k in sorted(ijdist_dict.keys()):
        data = np.array(ijdist_dict[k])
        mean = data.mean()  # mean of ijdist
        std = data.std()  # standard deviation of ijdist
        num = len(data)  # num of data in that ijdist
        outputf.write('{0:8d}{1:20.8f}{2:20.8f}{3:10d}\n'.format(
            k, mean, std, num))

    # Do some logging at the end
    utils.write_footer(outputf, beginning_time)
    outputf.close()
Ejemplo n.º 6
0
def main(args):
    utils.main_load()

    # check the validity of output file name, do backup
    output = args.optf
    if output is None:
        outputfile = '{0:s}.output.xvg'.format(args.grof)
    else:
        outputfile = output

    utils.backup(outputfile)

    # Do some logging at the beginning
    outputf = open(outputfile, 'w')
    beginning_time = utils.write_header(outputf)

    # do calculation
    result = calc_dihedral(args.grof, args.xtcf, args.btime, args.etime)

    # write results to the outputfile
    for r in result:
        outputf.write(r)

    # Do some logging at the end
    utils.write_footer(outputf, beginning_time)

    outputf.close()
Ejemplo n.º 7
0
    def dbBackup(self, table_list):
        backup(user=MySQL['user'],
               passwd=MySQL['passwd'],
               host=MySQL['host'],
               port=MySQL['port'],
               dbname=MySQL['db'],
               table_list=table_list)

        return self.respData(STATUS.OK, error='数据库备份成功~')
Ejemplo n.º 8
0
 def save_pbxproj(self):
     import utils
     utils.backup(file_path=self.__pbx_project_path)
     with open(self.__pbx_project_path, mode='w') as fp:
         fp.write('// !$*UTF8*$!\n')
         fp.write(
             self.dump_pbxproj(note_enabled=True,
                               json_format_enabled=False))
         fp.write('\n')
Ejemplo n.º 9
0
 def save(self, file_path: str = None):
     import utils
     if self.__buffer and isinstance(self.__buffer, io.BufferedReader):
         if not file_path or os.path.abspath(
                 self.__buffer.name) == os.path.abspath(file_path):
             file_path = self.__buffer.name
             utils.backup(file_path)
     with open(file_path, mode='w') as fp:
         fp.write(self.dump())
Ejemplo n.º 10
0
def main(args):
    main_load()

    output = args.optf
    if output is None:
        outputfile = '{0:s}.output.xtc'.format(args.tprf)
    else:
        outputfile = output
    backup(outputfile)

    # do calculation
    trjcat_plus(args.xtcf, args.tprf, outputfile)
Ejemplo n.º 11
0
def backup_saves(path_ds_save: str, forced=False):
    try:
        for path_file_name in glob(path_ds_save):
            log.debug(f"Check: {path_file_name}")

            # Get timestamps
            save_timestamp = os.path.getmtime(path_file_name)
            now_timestamp = time.time()

            # Save backup. If less than 600 secs have passed since the last modification of the file
            is_modified = (now_timestamp - save_timestamp) < 600
            ok = forced or is_modified
            log.debug(
                f"{'Need backup' if ok else 'Not need backup'}. "
                f"Reason: Forced={forced}, Is modified file save={is_modified}"
            )
            if not ok:
                continue

            file_name_backup = backup(path_file_name, now_timestamp)
            log.debug(f"Saving backup: {file_name_backup}")

    except:
        print("ERROR:\n" + traceback.format_exc())
        time.sleep(5 * 60)
Ejemplo n.º 12
0
def main(args):
    utils.main_load()
    outputfile =  args.optf if args.optf else '{0:s}.unun.xvg'.format(args.grof)
    utils.backup(outputfile)
    outputf = open(outputfile, 'w')
    beginning_time = utils.write_header(outputf)

    result = count_interactions(args.grof, args.xtcf, args.btime, args.etime, args.cutoff)

    # write headers
    outputf.write('# {0:>10s}{1:>8s}\n'.format('time', 'num'))
    # write results to the outputfile
    for r in result:
        outputf.write(r)

    # Do some logging at the end
    utils.write_footer(outputf, beginning_time)
    outputf.close()
Ejemplo n.º 13
0
def main(args):
    U.main_load()
    outputfile = args.optf if args.optf else '{0:s}.rama.xvg'.format(args.grof)
    U.backup(outputfile)
    outputf = open(outputfile, 'w')
    beginning_time = U.write_header(outputf)

    result = calc_rama(args.grof, args.xtcf, args.btime, args.etime)

    # write headers
    outputf.write('# {0:>10s}{1:>8s}\n'.format('phi', 'psi', 'resname-resid'))
    # write results to the outputfile
    for r in result:
        outputf.write(r)

    # Do some logging at the end
    U.write_footer(outputf, beginning_time)
    outputf.close()
Ejemplo n.º 14
0
def main(args):
    U.main_load()
    outputfile =  args.optf if args.optf else '{0:s}.rama.xvg'.format(args.grof)
    U.backup(outputfile)
    outputf = open(outputfile, 'w')
    beginning_time = U.write_header(outputf)

    result = calc_rama(args.grof, args.xtcf, args.btime, args.etime)

    # write headers
    outputf.write('# {0:>10s}{1:>8s}\n'.format('phi', 'psi', 'resname-resid'))
    # write results to the outputfile
    for r in result:
        outputf.write(r)

    # Do some logging at the end
    U.write_footer(outputf, beginning_time)
    outputf.close()
Ejemplo n.º 15
0
def testSpeedWithLibMinSub():
    minSupports = [
        0.5, 0.2, 0.1, 0.05, 0.02, 0.01, 0.008, 0.005, 0.002, 0.001, 0.0008,
        0.0005
    ]
    lable = [1 / x for x in minSupports]
    fileBackup = 'compareMinSup/backup.json'
    transactions, items = loadBaseDataSet()
    target = []
    optimizedLib = []
    for min_support in minSupports:
        print(f'Running with {min_support}-min_support'.center(100, ' '))
        min_confidence = 0.1
        min_lift = 0.0

        start1 = time.time()
        result1 = list(
            targetApriori(transactions=transactions,
                          items=items,
                          min_confidence=min_confidence,
                          min_support=min_support,
                          min_lift=min_lift,
                          numReduce=0.1))
        end1 = time.time()
        target.append(end1 - start1)

        start2 = time.time()
        result2 = list(
            pyLibOptimizedApriori(transactions,
                                  min_confidence=min_confidence,
                                  min_support=min_support,
                                  min_lift=min_lift))
        end2 = time.time()
        optimizedLib.append(end2 - start2)
        print(len(result1))
        print(len(result2))
        print('Backing up...')
        backup(fileBackup, minSupports, target, optimizedLib)
    labels = ['target', 'optimizedLib']
    labelD = {'y': 'Time (s)', 'x': '1/min_support'}
    backup(fileBackup, minSupports, target, optimizedLib)
    drawPlot(lable, [optimizedLib, target], labels,
             labelD).savefig('compareMinSup/reslut.png')
    print('Done!'.center(100, ' '))
Ejemplo n.º 16
0
def testSpeedWithLib():
    numDatas = [
        1000, 10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000,
        100000, 200000, 400000, 600000, 800000, 1000000
    ]
    fileBackup = 'compareLibApyori/backup.json'
    _, items = loadBaseDataSet()
    target = []
    optimizedLib = []
    for numData in numDatas:
        gTransactions = generateData(items, numData)
        transactions = gTransactions
        print(f'Running with {len(transactions)}-dataset'.center(100, ' '))
        min_support = 0.02
        min_confidence = 0.1
        min_lift = 0.0

        start1 = time.time()
        result1 = list(
            targetApriori(transactions=transactions,
                          items=items,
                          min_confidence=min_confidence,
                          min_support=min_support,
                          min_lift=min_lift,
                          numReduce=5))
        end1 = time.time()
        target.append(end1 - start1)

        start2 = time.time()
        result2 = list(
            pyLibOptimizedApriori(transactions,
                                  min_confidence=min_confidence,
                                  min_support=min_support,
                                  min_lift=min_lift))
        end2 = time.time()
        optimizedLib.append(end2 - start2)
        print('Backing up...')
        backup(fileBackup, numDatas, target, optimizedLib)
    labels = ['target', 'optimizedLib']
    labelD = {'y': 'Time (s)', 'x': 'Num Transactions'}
    backup(fileBackup, numDatas, target, optimizedLib)
    drawPlot(numDatas, [target, optimizedLib], labels,
             labelD).savefig('compareLibApyori/reslut.png')
    print('Done!'.center(100, ' '))
Ejemplo n.º 17
0
def main(args):
    utils.main_load()
    outputfile = args.optf if args.optf else '{0:s}.unun.xvg'.format(args.grof)
    utils.backup(outputfile)
    outputf = open(outputfile, 'w')
    beginning_time = utils.write_header(outputf)

    result = count_interactions(args.grof, args.xtcf, args.btime, args.etime,
                                args.cutoff)

    # write headers
    outputf.write('# {0:>10s}{1:>8s}\n'.format('time', 'num'))
    # write results to the outputfile
    for r in result:
        outputf.write(r)

    # Do some logging at the end
    utils.write_footer(outputf, beginning_time)
    outputf.close()
Ejemplo n.º 18
0
def backup_saves(path_ds_save: str):
    try:
        for path_file_name in glob(path_ds_save):
            file_name_backup = backup(path_file_name)
            log.debug(f"Saving backup: {file_name_backup}")

        beep()

    except:
        print("ERROR:\n" + traceback.format_exc())
        time.sleep(5 * 60)
Ejemplo n.º 19
0
def populate_html_file():

    backup(html_path)

    f_html = open(html_path, 'w')

    while harvard_course_catalog_url:

        opened_course_catalog = urllib2.urlopen(harvard_course_catalog_url)

        course_catalog_soup = BeautifulSoup(opened_course_catalog.read(), 'html.parser')

        for course_tag in course_catalog_soup.findAll(is_course_title):
            f_html.write('%s\n' % str(course_tag))

        next_link_tag = course_catalog_soup.find(is_next_link)

        harvard_course_catalog_url = next_link_tag.get('href') if next_link_tag else None

    f_html.close()
Ejemplo n.º 20
0
def main(cmd_args):
    args = get_args(cmd_args)

    utils.main_load()

    output = args.optf
    if output is None:
        # it's a log since the results are written to the h5 file directly
        outputfile = '{0:s}.output.log'.format(args.grof)
    else:
        outputfile = output

    utils.backup(outputfile)

    outputf = open(outputfile, 'w')
    beginning_time = utils.write_header(outputf)

    A = args
    if not os.path.exists(A.h5):
        raise IOError('{0} does not exist'.format(A.h5))

    # *10: convert to angstrom from nm
    result = count_interactions(A)
    path = os.path.join('/', os.path.dirname(A.xtcf))
    tb_name = os.path.join(path, 'unun_map')

    h5 = tables.openFile(A.h5, mode='a')
    if h5.__contains__(tb_name):
        logger.info(
            'found {0} already in {0}, replacing with new calculated values'.
            format(tb_name, A.h5))
        _ = h5.getNode(tb_name)
        _.remove()
    h5.createArray(where=path, name='unun_map', object=result)
    h5.close()

    utils.write_footer(outputf, beginning_time)
    outputf.close()
Ejemplo n.º 21
0
def testSpeedWithNormal():
    fileBackup = 'compareNormal/backup.json'
    _, items = loadBaseDataSet()
    numDatas = [1000, 1500, 2000, 2500, 3000, 4000, 5000]
    target = []
    purePriori = []
    for numData in numDatas:
        gTransactions = generateData(items, numData)
        transactions = gTransactions
        print(f'Running with {len(transactions)}-dataset'.center(100, ' '))
        min_support = 0.02
        min_confidence = 0.1
        min_lift = 0.0

        start1 = time.time()
        result1 = list(
            targetApriori(transactions=transactions,
                          items=items,
                          min_confidence=min_confidence,
                          min_support=min_support,
                          min_lift=min_lift,
                          numReduce=5))
        end1 = time.time()
        target.append(end1 - start1)

        start2 = time.time()
        result2 = list(pureApriori(transactions, min_support))
        end2 = time.time()
        purePriori.append(end2 - start2)

        print('Backing up...')
        backup(fileBackup, numDatas, target, purePriori)
    labels = ['target', 'pureFuncion']
    labelD = {'y': 'Time (s)', 'x': 'Num Transactions'}
    backup(fileBackup, numDatas, target, purePriori)
    drawPlot(numDatas, [target, purePriori], labels,
             labelD).savefig('compareNormal/reslut.png')
    print('Done!'.center(100, ' '))
Ejemplo n.º 22
0
def main(cmd_args):
    args = get_args(cmd_args)

    utils.main_load()
    
    output = args.optf
    if output is None:
        # it's a log since the results are written to the h5 file directly
        outputfile = '{0:s}.output.log'.format(args.grof)
    else:
        outputfile = output

    utils.backup(outputfile)

    outputf = open(outputfile, 'w')
    beginning_time = utils.write_header(outputf)

    A = args
    if not os.path.exists(A.h5): 
        raise IOError('{0} does not exist'.format(A.h5))

    # *10: convert to angstrom from nm
    result = count_interactions(A)
    path = os.path.join('/', os.path.dirname(A.xtcf))
    tb_name = os.path.join(path, 'unun_map')

    h5 = tables.openFile(A.h5, mode='a')
    if h5.__contains__(tb_name):
        logger.info('found {0} already in {0}, replacing with new calculated values'.format(tb_name, A.h5))
        _ = h5.getNode(tb_name)
        _.remove()
    h5.createArray(where=path, name='unun_map', object=result)
    h5.close()

    utils.write_footer(outputf, beginning_time)
    outputf.close()
Ejemplo n.º 23
0
}""" % (args.source, args.target))

# If yes, find the shortest path and convert
for doable in job:
    if doable:
        print("Conversion available!\n=====================")
        # Find the conversions to update the model in minimum number of version upgrades
        conversions = find_conversions(args.source, args.target,
                                       versions_graph)

        # Loop through all the input models
        for model in args.models:
            print('\n\nUpdating {}...'.format(model))

            # Create a model.bak
            backup(model)
            standardize_namespaces(model)

            # Execute all conversions
            for conversion in conversions:
                model_graph = Graph()
                model_graph.parse(model, format='turtle')
                print("Converting to {}...".format(conversion[1]))
                convert(conversion, model_graph)
                model_graph.serialize(model, format='turtle')
                bump_versions(model, conversion[0], conversion[1])
            print('Output stored: {}'.format(model))
    else:
        print("No conversions available from {} to {}.".format(
            args.source, args.target))
Ejemplo n.º 24
0
 def save(self):
     import utils
     utils.backup(file_path=self.__file_path)
     with open(self.__file_path, mode='w') as fp:
         self.__buffer.seek(0)
         fp.write(self.__buffer.read())
Ejemplo n.º 25
0
import utils
utils.backup(__file__)
Ejemplo n.º 26
0
from __future__ import division
from pylab import *
import random
# there already is a sample in the namespace from numpy
from random import sample as smpl
import itertools
import utils
utils.backup(__file__)
import synapses


class AbstractSource(object):
    def __init__(self):
        """
        Initialize all relevant variables.
        """
        raise NotImplementedError

    def next(self):
        """
        Returns the next input
        """
        raise NotImplementedError

    def global_range(self):
        """
        Returns the maximal global index of all inputs
        """
        raise NotImplementedError

    def global_index(self):
Ejemplo n.º 27
0
                    metavar='PATH',
                    help='path to source dir (default: "")')
parser.add_argument('-d',
                    '--dst',
                    default='',
                    type=str,
                    metavar='PATH',
                    help='path to destination dir (default: "")')
parser.add_argument('-t',
                    '--time',
                    default=600,
                    type=int,
                    metavar='N',
                    help='interval seconds to backup')


def ignore(src, dst):
    return [
        'train.json', 'train.json.7z', 'test.json', 'test.json.7z',
        'stinkbug.png', 'sample_submission.csv', 'sample_submission.csv.7z'
    ]


if __name__ == '__main__':
    args = parser.parse_args()
    assert args.src, 'source dir is none'
    assert args.dst, 'distination dir is none'
    assert args.time > 0, 'interval should not be negative'

    utils.backup(args.src, args.dst, False, ignore, args.time)