Esempio n. 1
0
def addcontent():
    form = UploadBlog(request.form)
    if request.method == 'POST' and form.validate():
        Process.uploadBlog(form.title.data, form.subtitle.data,
                           form.content.data, form.author.data)
        print(" Successfully Post Blog")
    return render_template('add_content.html')
Esempio n. 2
0
def selected():
    session['userid'] = 'Mary'
    form = SpendForm(request.form)
    if request.method == 'POST' and form.validate():
        monthMM = form.monthStr.data
        yearYY = form.yearStr.data
        print(monthMM)

    months = ('Null', 'January', 'February', 'March', 'April', 'May', 'June',
              'July', 'August', 'September', 'October', 'November', 'December')

    usersList = []
    checkMonth = int(monthMM)
    checkYear = int(yearYY)
    usersList = Process.processUser(session['userid'],
                                    todayMonth=checkMonth,
                                    todayYear=checkYear)
    limit = []
    limit = Process.limit(session['userid'], checkMonth, checkYear)
    over = Process.over(session['userid'], checkMonth, checkYear)
    interest = Process.interest(session['userid'], checkMonth, checkYear)

    return render_template('nov.html',
                           checkMM=months[checkMonth],
                           checkYY=checkYear,
                           users=usersList,
                           count=len(usersList),
                           limits=limit,
                           over=over,
                           form=form,
                           interest=interest)
Esempio n. 3
0
    def Build(self, env):

        input_file = self.GetInputFile(env)
        output_files = self.GetOutputFiles(env)
        print("clscan: " + os.path.basename(input_file))

        # Construct the command-line
        cmdline = [_MakePath("clscan.exe")]
        cmdline += [input_file]
        cmdline += ["-output", output_files[0]]
        cmdline += ["-ast_log", output_files[1]]
        cmdline += ["-spec_log", output_files[2]]
        for path in self.SysIncludePaths:
            cmdline += ["-isystem", path]
        for path in self.IncludePaths:
            cmdline += ["-i", path]
        for define in self.Defines:
            cmdline += ["-D", define]
        Utils.ShowCmdLine(env, cmdline)

        # Launch the scanner and wait for it to finish
        output = Utils.LineScanner(env)
        output.AddLineParser("Includes", "Included:", None,
                             lambda line, length: line[length:].lstrip())
        process = Process.OpenPiped(cmdline)
        Process.WaitForPipeOutput(process, output)

        return process.returncode == 0
Esempio n. 4
0
def dec():
    session['userid'] = 'Mary'

    now = datetime.datetime.now()
    todayMonth = now.month
    todayYear = now.year
    months = ('January', 'February', 'March', 'April', 'May', 'June', 'July',
              'August', 'September', 'October', 'November', 'December')

    if todayMonth == 1:
        prevMonth = 12
    else:
        prevMonth = todayMonth - 1

    if todayYear == 2018:
        prevYear = 2017
    else:
        prevYear = now.year - 1
    usersList = []
    usersList = Process.processUser(session['userid'], prevMonth, prevYear)
    totalLeft = Process.savingsLeft(session['userid'], prevMonth, prevYear)
    limit = []
    limit = Process.limit(session['userid'], prevMonth, prevYear)
    over = Process.over(session['userid'], prevMonth, prevYear)

    return render_template('dec.html',
                           users=usersList,
                           todayMonth=todayMonth,
                           prevMonth=prevMonth,
                           todayYear=todayYear,
                           prevYear=prevYear,
                           left=totalLeft,
                           limits=limit,
                           over=over)
Esempio n. 5
0
    def Build(self, env):

        # Ensure command -line for current configuration is up-to-date
        options = self.OptionsMap[env.CurrentConfig.CmdLineArg]
        options.UpdateCommandLine()

        # Augment command-line with current environment
        cmdline = [os.path.join(_InstallPath, "wave.exe")]
        cmdline += self.OptionsMap[env.CurrentConfig.CmdLineArg].CommandLine
        cmdline += ['--output=' + self.GetOutputFiles(env)[0]]
        cmdline += ['--listincludes=-']
        cmdline += [self.GetInputFile(env)]
        Utils.ShowCmdLine(env, cmdline)

        # Launch Wave with a dependency scanner and wait for it to finish
        scanner = Utils.LineScanner(env)
        scanner.AddLineParser(
            "Includes", '"', ["<"],
            lambda line, length: line.split("(")[1].rstrip()[:-1])
        process = Process.OpenPiped(cmdline, env.EnvironmentVariables)
        Process.WaitForPipeOutput(process, scanner)

        # Record the implicit dependencies for this file
        data = env.GetFileMetadata(self.GetInputFile(env))
        data.SetImplicitDeps(env, scanner.Includes)

        return process.returncode == 0
Esempio n. 6
0
    def RR(self):
        while self.NumOfProcess:
            self.RefreshProcesses()
            while len(self.ActiveProcesses):
                remaining = self.ActiveProcesses[0].GetRemainingTime()
                if (remaining >= self.quantum):
                    id = self.ActiveProcesses[0].GetID()
                    self.ActiveProcesses[0].SetRemainingTime(
                        self.ActiveProcesses[0].GetRemainingTime() -
                        self.quantum)
                    self.Busy(self.quantum)
                    self.ActiveProcesses.append(self.ActiveProcesses[0])
                    self.ActiveProcesses.pop(0)
                    self.PrintProcesses.append(
                        Process(id, self.Time - self.quantum, self.quantum, 1))
                    if id != self.ActiveProcesses[0].GetID():
                        self.Busy(self.context)
                else:
                    self.ActiveProcesses[0].SetFinishTime(remaining +
                                                          self.Time)
                    self.Busy(remaining)
                    self.PrintInfo.append(self.ActiveProcesses[0])

                    self.PrintProcesses.append(
                        Process(self.ActiveProcesses[0].GetID(),
                                self.Time - remaining, remaining, self.Time))
                    self.NumOfProcess -= 1
                    self.ActiveProcesses.pop(0)
                    self.Busy(self.context)
            self.Time += 1
        self.printData()
        self.Draw()
Esempio n. 7
0
def varObject(sentence):
    wordlist = p.getWord(sentence)
    poslist = p.getTag(sentence)
    objone = ""
    objtwo = ""
    if len(wordlist) > 3 and wordlist[0] in [
            "turn", "set", "point", "save", "make", "change", "appoint",
            "schedule", "direct", "realize", "write", "allocate", "deposit",
            "put", "place", "store", "deposit"
    ]:
        objone = wordlist[1]
        if poslist[2] == "article" or "preposition":
            # if len(poslist) - 1 == 2:
            #     objtwo = wordlist[2]
            # else:
            objtwo = wordlist[3]
        elif poslist[2] == "noun" or "numeral" or wordlist[2] == "to":
            objtwo = wordlist[3]
    # elif poslist[0] == "noun" or "numeral":
    #     if poslist[0] == "noun" or wordlist[0] == :
    #         objone = wordlist[1]
    #         objtwo = wordlist[3]
    #     else:
    #         objone = wordlist[0]
    #         objtwo = wordlist[2]
    elif wordlist[1] == "=" or "->" or "is":
        objone = wordlist[0]
        objtwo = wordlist[2]
    else:
        pass

    return objone, objtwo
Esempio n. 8
0
    def Build(self, env):

        output_files = self.GetOutputFiles(env)

        # Node entry point takes precendence over config specified entry-point
        entry_point = self.EntryPoint
        if entry_point == None:
            entry_point = env.CurrentConfig.FXCompileOptions.EntryPoint

        # Build command line
        cmdline = [os.path.join(x86BinDir, "fxc.exe")]
        cmdline += [self.Path, '/T' + self.Profile]
        cmdline += env.CurrentConfig.FXCompileOptions.CommandLine
        cmdline += self.DefineCmdLine
        cmdline += self.BuildCommandLine
        if entry_point:
            cmdline += ['/E' + entry_point]
        Utils.ShowCmdLine(env, cmdline)

        # Create the include scanner and launch the compiler
        scanner = Utils.LineScanner(env)
        scanner.AddLineParser("Includes", "Resolved to [",
                              ["Opening file [", "Current working dir ["],
                              lambda line, length: line[length:-1].lstrip())
        process = Process.OpenPiped(cmdline, env.EnvironmentVariables)
        Process.WaitForPipeOutput(process, scanner)

        # Record the implicit dependencies for this file
        data = env.GetFileMetadata(self.GetInputFile(env))
        data.SetImplicitDeps(env, scanner.Includes)

        return process.returncode == 0
Esempio n. 9
0
def GetVisualCEnv():

    if VSInstallDir == None:
        print("ERROR: Visual Studio install directory not detected")
        return None

    # Locate the batch file that sets up the Visual C build environment
    if not os.path.exists(VCVarsPath):
        print("ERROR: Visual C environment setup batch file not found")
        return None

    # Run the batch file, output the environment and prepare it for parsing
    process = Process.OpenPiped(VCVarsPath +
                                " x86 & echo ===ENVBEGIN=== & set")
    output = Process.WaitForPipeOutput(process)
    output = output.split("===ENVBEGIN=== \r\n")[1]
    output = output.splitlines()

    # Start with the current environment, override with any parsed environment values
    env = os.environ.copy()
    for line in output:
        try:
            var, value = line.split("=")
            env[var.upper()] = value
        except:
            print("WARNING: environment variables skipped -> " + line)

    # This environment variable is defined in the VS2005 IDE and prevents cl.exe output
    # being correctly captured, so remove it!
    if "VS_UNICODE_OUTPUT" in env:
        del env["VS_UNICODE_OUTPUT"]

    return env
def create_round_robin_test(machine, quantum):
    """
    Creates the example of four processes from Professor Amr Elkady's lecture
    :return: returns a machine with the lecture example processes
    """

    #
    # Professor Amr Elkady's class lecture example
    #

    # process A
    processA = Process.Process("A", 0, quantum)

    processA.add_cpu_burst(4)
    processA.add_io_burst(4)
    processA.add_cpu_burst(4)
    processA.add_io_burst(4)
    processA.add_cpu_burst(4)

    machine.add(processA)

    # process B
    processB = Process.Process("B", 2, quantum)

    processB.add_cpu_burst(8)
    processB.add_io_burst(1)
    processB.add_cpu_burst(8)

    machine.add(processB)

    return machine
Esempio n. 11
0
    def SRTN(self):

        while self.NumOfProcess:
            self.RefreshProcesses()
            self.SortRemainingTime()
            while len(self.ActiveProcesses):
                id = self.ActiveProcesses[0].GetID()
                p = self.ActiveProcesses[0]
                end = True
                counter = 0
                while self.ActiveProcesses[0].GetRemainingTime():
                    self.Time += 1
                    counter += 1
                    p.SetRemainingTime(p.GetRemainingTime() - 1)
                    self.RefreshProcesses()
                    self.SortRemainingTime()
                    if id != self.ActiveProcesses[0].GetID():
                        self.PrintProcesses.append(
                            Process(p.GetID(), self.Time - counter, counter,
                                    self.Time))
                        self.Busy(self.context)
                        end = False
                        break
                if (end):
                    self.ActiveProcesses[0].SetFinishTime(self.Time)
                    self.PrintInfo.append(self.ActiveProcesses[0])
                    self.PrintProcesses.append(
                        Process(self.ActiveProcesses[0].GetID(),
                                self.Time - counter, counter, self.Time))
                    self.NumOfProcess -= 1
                    self.ActiveProcesses.pop(0)
                    self.Busy(self.context)
            self.Time += 1
        self.printData()
        self.Draw()
Esempio n. 12
0
    def Build(self, env):

        # Ensure command -line for current configuration is up-to-date
        options = self.OptionsMap[env.CurrentConfig.CmdLineArg]
        options.UpdateCommandLine()

        output_files = self.GetOutputFiles(env)

        # Build command-line from current configuration
        cmdline = [os.path.join(_InstallPath, "cbpp.exe")]
        cmdline += [self.GetInputFile(env)]
        cmdline += options.CommandLine
        cmdline += ["-noheader"]
        cmdline += ["-output", output_files[0]]
        cmdline += ["-show_includes"]
        if len(output_files) > 1:
            cmdline += ["-output_bin", output_files[1]]
        cmdline += ["-target", self.Target]
        Utils.ShowCmdLine(env, cmdline)

        # Launch cbpp with a dependency scanner and wait for it to finish
        scanner = Utils.LineScanner(env)
        scanner.AddLineParser("Includes", 'cpp: included "', None,
                              lambda line, length: line[length:-1])
        process = Process.OpenPiped(cmdline, env.EnvironmentVariables)
        Process.WaitForPipeOutput(process, scanner)

        # Record the implicit dependencies for this file
        data = env.GetFileMetadata(self.GetInputFile(env))
        data.SetImplicitDeps(env, scanner.Includes)

        return process.returncode == 0
Esempio n. 13
0
    def Build(self, env):

        # Node entry point takes precedence over config specified entry-point
        entry_point = self.EntryPoint
        if entry_point == None:
            entry_point = env.CurrentConfig.ShaderCompileOptions.EntryPoint

        # Build command line
        cmdline = [ os.path.join(ShaderCompilerPath, "ShaderCompiler.exe") ]
        cmdline += [ '/T' + self.Profile ]
        cmdline += env.CurrentConfig.ShaderCompileOptions.CommandLine
        cmdline += self.DefineCmdLine
        cmdline += self.BuildCommandLine
        if entry_point:
            cmdline += [ '/E' + entry_point ]
        cmdline += [ "/ShowCppOutputs" ]
        if ShowTrace:
            cmdline += [ "/trace" ]
        cmdline += [ self.Path ]
        Utils.ShowCmdLine(env, cmdline)

        # Create the include scanner and launch the compiler
        scanner = Utils.LineScanner(env)
        scanner.AddLineParser("Includes", "cpp: included", None, lambda line, length: line.lstrip()[15:-1])
        scanner.AddLineParser("Outputs", "cpp: output", None, lambda line, length: line.lstrip()[12:])
        process = Process.OpenPiped(cmdline, env.EnvironmentVariables)
        Process.WaitForPipeOutput(process, scanner)

        # Record the implicit dependencies/outputs for this file
        data = env.GetFileMetadata(self.GetInputFile(env))
        data.SetImplicitDeps(env, scanner.Includes)
        data.SetImplicitOutputs(env, scanner.Outputs)

        return process.returncode == 0
Esempio n. 14
0
    def Build(self, env):

        output_files = self.GetOutputFiles(env)
        Utils.Print(env, "Linking: " + output_files[0] + "\n")

        # Construct the command-line
        cmdline = ["link.exe"] + env.CurrentConfig.LinkOptions.CommandLine
        cmdline += ['/OUT:' + output_files[0]]
        if env.CurrentConfig.LinkOptions.MapFile:
            cmdline += ["/MAP:" + output_files[1]]
        cmdline += [dep.GetOutputFiles(env)[0] for dep in self.Dependencies]
        cmdline += [dep.GetOutputFiles(env)[0] for dep in self.LibFiles]
        cmdline += [dep.GetOutputFiles(env)[0] for dep in self.WeakLibFiles]
        Utils.ShowCmdLine(env, cmdline)

        #
        # When library files get added as dependencies to this link node they get added without a path.
        # This requires the linker to check its list of search paths for the location of any input
        # library files.
        #
        # The build system however, needs full paths to evaluate dependencies on each build. Rather than
        # trying to search the library paths in the build system (and potentially getting them wrong/different
        # to the linker), the linker is asked to output the full path of all libraries it links with. These
        # then get added as implicit dependencies.
        #
        # Create the lib scanner and run the link process
        #
        scanner = Utils.LineScanner(env)
        scanner.AddLineParser(
            "Includes", "Searching ",
            ["Searching libraries", "Finished searching libraries"],
            lambda line, length: line[length:-1])
        process = Process.OpenPiped(cmdline, env.EnvironmentVariables)
        Process.PollPipeOutput(process, scanner)

        #
        # Weak library files are those that should be provided as input to the link step but not used
        # as dependencies to check if the link step needs to be rebuilt. Search for those in the scanner
        # output and exclude them from the implicit dependency list.
        #
        includes = []
        for include in scanner.Includes:

            ignore_dep = False
            for lib in self.WeakLibFiles:
                lib_name = lib.GetInputFile(env)
                if lib_name in include:
                    ignore_dep = True
                    break

            if not ignore_dep:
                includes.append(include)

        # Record the implicit dependencies for this file
        data = env.GetFileMetadata(self.GetInputFile(env))
        data.SetImplicitDeps(env, includes)

        return process.returncode == 0
def render_s_img(img, markers, joints, markers_3d, joints_3d, marker_occl, joint_occl, camobj, supp_pts, size=691):
    padding = 0.04 * size
    r_img, new_cam = proc.resize_pad_crop_img_with_camparams(img, supp_pts, camobj.dump_params(), size, padding)
    camproj = proc.param2camproj(new_cam)
    new_joints = camproj.project_linear(joints_3d.T.astype(np.float32))
    new_markers = camproj.project_linear(markers_3d.T.astype(np.float32))

    plt_img = plot_marker(r_img, new_markers, marker_occl, size=12)
    return plot_linkage(plt_img, new_joints, joint_occl, False)
Esempio n. 16
0
def Extract7ZipFileTo(filename, path):
    if SevenZipExe == None:
        print("ERROR: 7-Zip has not been installed. Call Install7Zip first.")
        return

    # Use previously-installed 7zip
    command_line = f"{SevenZipExe} x -o{path} {filename}"
    process = Process.OpenPiped(command_line)
    Process.PollPipeOutput(process, lambda t: print(t.strip("\r\n")))
Esempio n. 17
0
def main():
    process1 = Process.Process(PREFIX_COMP_ARR)
    idx_guide_dict = process1.get_index_guide()
    process2 = Process.Process(PREFIX_TARGET_ARR)
    target_dict = process2.get_target_seq()
    # print(len(target_dict))
    # print(target_dict)
    result = process2.get_data(CNT, idx_guide_dict, target_dict)
    util = Utils.Utils([REULT_PATH, result])
    util.make_excel()
Esempio n. 18
0
def test_correlation(data):
    """
    Test method for correlation method in
    process file with given
    dataframe, does not crash if it passed
    """
    assert_equals(0.62520091,
                  Process.correlation(data, 'Hip circumference (cm)'))
    assert_equals(0.29145844, Process.correlation(data, 'Age (years)'))
    assert_equals(-0.0894953, Process.correlation(data, 'Height (inches)'))
Esempio n. 19
0
def ifObject(sentence):
    wordlist = p.getWord(sentence)
    poslist = p.getTag(sentence)
    objtwo = ""
    objone = wordlist[1]
    operator = ""
    lessthan = ["light", "less", "tin", "small", "lower", "young"]
    greaterthan = ["great", "old", "tall", "big", "high", "heav"]
    wrong = ["unequal", "un", "not", "!"]
    lessflag = False
    moreflag = False
    wrongFlag = False
    equalFlag = False
    for x in range(2, len(wordlist)):
        for y in lessthan:
            if y in wordlist[x]:
                if "equal" in wordlist[x]:
                    equalFlag = True
                    lessflag = True
                else:
                    lessflag = True
        for y in greaterthan:
            if y in wordlist[x]:
                if "equal" in wordlist[x]:
                    equalFlag = True
                    moreflag = True
                else:
                    moreflag = True
        for y in wrong:
            if y in wordlist[x]:
                wrongFlag = True
        if "equal" in wordlist[x]:
            equalFlag = True

        if lessflag:
            if equalFlag:
                operator = "<="
            else:
                operator = "<"
        elif moreflag:
            if equalFlag:
                operator = ">="
            else:
                operator = ">"
        elif wrongFlag:
            operator = "!="

        else:
            operator = "=="

        if x == len(wordlist) - 1:
            objtwo = wordlist[x]

    return objone, operator, objtwo
Esempio n. 20
0
    def start(self):
        self.logFilePathMessage.set("")
        if self.logFilePath.get() and self.mes.get():

            path = SetupPath.pathFiles(self.logFilePath.get())
            init_dataframes = InitDataFrames.InitDataFrames(path)
            Process.mainProcess(init_dataframes, self.mes.get())

        else:
            self.logFilePathMessage.set(
                "Seleccione la Carpeta Resources y ponga el mes")
Esempio n. 21
0
def train():
    with tf.Graph().as_default():

        images, labels = Process.inputs()

        tf.summary.image("Base_Image", images, max_outputs=2, collections=None)

        forward_propagation_results = Process.forward_propagation(images, 0.5)

        softmax_debug = tf.nn.softmax(forward_propagation_results)

        cost = Process.error(forward_propagation_results, labels)

        train_op = Process.train(cost)

        saver = tf.train.Saver()

        summary_op = tf.summary.merge_all()

        init = tf.global_variables_initializer()

        sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))

        sess.run(init)

        saver = tf.train.Saver()

        tf.train.start_queue_runners(sess=sess)

        train_dir = "nn-data"

        summary_writer = tf.summary.FileWriter(train_dir, sess.graph)

        for step in xrange(520000):
            start_time = time.time()
            loss_value, _, debug, labelsa = sess.run(
                [cost, train_op, softmax_debug, labels])
            print(debug)
            duration = time.time() - start_time
            #accuracy = sess.run(train_accuracy)

            if step % 1 == 0:
                examples_per_sec = 1 / duration
                sec_per_batch = float(duration)
                format_str = ('%s: step %d, (%.3f examples/sec; %.3f '
                              'sec/batch) loss = %.3f')
                print(format_str % (datetime.now(), step, examples_per_sec,
                                    sec_per_batch, loss_value))
                print(labelsa)
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)
                checkpoint_path = os.path.join(train_dir, 'model')
                saver.save(sess, checkpoint_path)
def create_lecture_example(machine, quantum):
    """
    Creates the example of four processes from Professor Amr Elkady's lecture
    :return: returns a machine with the lecture example processes
    """

    #
    # Professor Amr Elkady's class lecture example
    #

    quantum = 3

    # process A
    processA = Process.Process("A", 0, quantum, priority=5)

    processA.add_cpu_burst(4)
    processA.add_io_burst(4)
    processA.add_cpu_burst(4)
    processA.add_io_burst(4)
    processA.add_cpu_burst(4)

    machine.add(processA)

    # process B
    processB = Process.Process("B", 2, quantum, priority=10)

    processB.add_cpu_burst(8)
    processB.add_io_burst(1)
    processB.add_cpu_burst(8)

    machine.add(processB)

    # process C
    processC = Process.Process("C", 3, quantum, priority=1)

    processC.add_cpu_burst(2)
    processC.add_io_burst(1)
    processC.add_cpu_burst(2)

    machine.add(processC)

    # process D
    processD = Process.Process("D", 7, quantum, priority=0)

    processD.add_cpu_burst(1)
    processD.add_io_burst(1)
    processD.add_cpu_burst(1)
    processD.add_io_burst(1)
    processD.add_cpu_burst(1)

    machine.add(processD)

    return machine
Esempio n. 23
0
    def runSlot(self):
        # this is called when the run button is clicked
        #self.debugPrint("The run button was clicked")

        # get the updated information, and then run the scan
        self.updateInfoForRun()
        
        # call the scan
        # FIXME: This needs fixed if we want the save to be optional

        try:
            # display dialog that the scan is running
            m1 = QtWidgets.QMessageBox()
            m1.setText("Scan is about to start press OK to continue.\n"
                       "Once scan starts please wait this may take a few minutes.\n")
            m1.setIcon(QtWidgets.QMessageBox.Warning)
            m1.setStandardButtons(QtWidgets.QMessageBox.Ok
                                  | QtWidgets.QMessageBox.Cancel)
            m1.setDefaultButton(QtWidgets.QMessageBox.Cancel)
            ret = m1.exec_()
            Process.main(self.videoFileLocationText,self.saveLocationText)
            m1.setText("Scan Complete!")
            ret = m1.exec_()

            # read the data and print to the dialog box
            with open(self.saveLocationText + '/review_settings.csv') as f1:
                s1 = f1.read() + '\n'
            with open(self.saveLocationText + '/patient_limits.csv') as f2:
                s2 = f2.read() + '\n'
            self.reportText.setPlainText(s1 + '\n' + s2)

        except Exception as ex:
            m1.accept()
            template = "An exception of type {0} occurred. Arguments:\n{1!r}"
            message = template.format(type(ex).__name__, ex.args)
            m2 = QtWidgets.QMessageBox()
            m2.setText("Invalid file name!\nCheck the video file and save location.\n"
                       + message)
            m2.setIcon(QtWidgets.QMessageBox.Warning)
            m2.setStandardButtons(QtWidgets.QMessageBox.Ok
                                 | QtWidgets.QMessageBox.Cancel)
            m2.setDefaultButton(QtWidgets.QMessageBox.Cancel)
            ret = m2.exec_()

        # if the import option is selected lets run that
        # the files come from 'Process'
        if self.importToNeo4jBool:
            WorkingWithNeo4j.main(self.patientInfoText, self.saveLocationText + '/review_settings.csv',
                                  self.saveLocationText + '/patient_limits.csv')

        if not self.saveCheckBoxBool:
            os.remove(self.saveLocationText + '/patient_limits.csv')
            os.remove(self.saveLocationText + '/review_settings.csv')
def getOCR():

    if config['DEFAULT']['onCamera'] == 1:
        Process.takePicture()
        img = cv2.imread('ocrFile.jpg')
    else:
        img = cv2.imread(config['DEFAULT']['imageFile'])
    cv2.imshow("image", img)
    OCR = Process.Process(img).processing()
    print(OCR)

    return OCR
Esempio n. 25
0
def baseline(corpus_file_name):
    dic_es = hunspellES.HunSpell("es.dic",
                                 "es.aff")  #Get Hunspell spanish dictionary
    dic_eu = hunspellEU.HunSpell('eu.dic',
                                 'eu.aff')  #Get Hunspell basque dictionary

    CS_Corpus = open(corpus_file_name, 'rb')
    CS_Reader = csv.reader(CS_Corpus, delimiter=',', quotechar='"')
    CS_Reader.next()  #Skip first line

    count = 0
    right = 0
    total = 0

    y_true = []
    y_pred = []

    for row in CS_Reader:
        row_processed = getTweetTokensTags(row)
        hand_tagged_tags = []
        for token, tag in row_processed:
            hand_tagged_tags.append(tag)

        tokens = Process.tokenize(
            row[2].decode('UTF-8'))  # Tweet text tokenized
        predicted_tags = []
        for i in range(0, len(tokens)):
            t0 = tokens[i]
            if i > 0 and tokens[i - 1] not in [".", "!", "?"] and t0.istitle():
                predicted_tags.append("IE")
            elif t0.isupper():
                predicted_tags.append("IE")
            elif dic_es.spell(t0):
                predicted_tags.append("ES")
            elif dic_eu.spell(t0):
                predicted_tags.append("EUS")
            elif Process.isURL(t0):
                predicted_tags.append("URL")
            elif Process.isID(t0):
                predicted_tags.append("ID")
            else:
                predicted_tags.append("EG")

        y_true.append(hand_tagged_tags)
        y_pred.append(predicted_tags)

    print ""
    print "Sequence item accuracy score: %.5f" % seqItemAccuracyScore(
        y_true, y_pred)
    print "Sequence accuracy score: %.5f" % seqAccuracyScore(y_true, y_pred)
    print "Global tag accuracy score: %.5f" % globalTagAccuracyScore(
        y_true, y_pred)
def main(features, algorithm, seed, maxiter, stepsize, numtrees, maxdepth,
         regparam, elasticnetparam, threshold):
    sc = SparkContext.getOrCreate()
    sc.setLogLevel('ERROR')
    feature_list = features.split()
    if algorithm == 'RandomForest':
        # python Driver.py 'BFSIZE HDRSIZE NODETYPE NODESTATE METADATASIZE' RandomForest
        # numTrees = 20
        df, _ = Process.extract_features(feature_list,
                                         binary=False,
                                         multiclass=True,
                                         overwrite=False)
        Learn.randomForest(df,
                           feature_list,
                           maxDepth=maxdepth,
                           numTrees=numtrees,
                           seed=seed)
    elif algorithm == 'GradientBoosting':
        _, df = Process.extract_features(feature_list,
                                         binary=True,
                                         multiclass=False,
                                         overwrite=False)
        Learn.gradientBoosting(df,
                               feature_list,
                               maxIter=maxiter,
                               stepSize=stepsize)
    elif algorithm == 'MultinomialRegression':
        # python Driver.py 'BFSIZE HDRSIZE NODETYPE NODESTATE METADATASIZE' MultinomialRegression --maxIter 10 --regParam 0.3 --elasticNetParam 0.8 --threshold 0.5
        # without metadatasize
        df, _ = Process.extract_features(feature_list,
                                         binary=False,
                                         multiclass=True,
                                         overwrite=False)
        Learn.multinomialRegression(df,
                                    feature_list,
                                    maxIter=maxiter,
                                    regParam=regparam,
                                    elasticNetParam=elasticnetparam,
                                    threshold=threshold)
    elif algorithm == 'LinearSVC':
        # python Driver.py 'BFSIZE HDRSIZE NODETYPE NODESTATE METADATASIZE' LinearSVC --maxIter 10 --regParam 0.3 --threshold 0.5
        _, df = Process.extract_features(feature_list,
                                         binary=True,
                                         multiclass=False,
                                         overwrite=False)
        Learn.linearSVC(df,
                        feature_list,
                        maxIter=maxiter,
                        regParam=regparam,
                        threshold=threshold)

    sc.stop()
Esempio n. 27
0
    def compute(self):
        basicColorChosen = ((self.basicColor1CheckBox.isChecked()) +
                            (self.basicColor2CheckBox.isChecked() << 1) +
                            (self.basicColor3CheckBox.isChecked() << 2) +
                            (self.basicColor4CheckBox.isChecked() << 3) +
                            (self.basicColor5CheckBox.isChecked() << 4) +
                            (self.basicColor6CheckBox.isChecked() << 5))
        self.setSampleData()
        process = Process()
        Max = 5000
        resp = process.compute(basicColorChosen, self.sampleData,
                               [[0, Max]] * 6, 1, Max)
        # tmp  = process.compute(basicColorChosen, self.sampleData, [[tmp[0][i] - 15, tmp[0][i] + 15] for i in range(len(tmp[0]))], 5)
        # resp = process.compute(basicColorChosen, self.sampleData, [[tmp[0][i] - 8, tmp[0][i] + 8] for i in range(len(tmp[0]))], 1)
        sampleRes = resp[1]
        ratio = resp[0]
        pos = 0
        self.ratio = [0, 0, 0, 0, 0, 0]
        if self.basicColor1CheckBox.isChecked():
            self.ratio[0] = ratio[pos]
            pos += 1
        if self.basicColor2CheckBox.isChecked():
            self.ratio[1] = ratio[pos]
            pos += 1
        if self.basicColor3CheckBox.isChecked():
            self.ratio[2] = ratio[pos]
            pos += 1
        if self.basicColor4CheckBox.isChecked():
            self.ratio[3] = ratio[pos]
            pos += 1
        if self.basicColor5CheckBox.isChecked():
            self.ratio[4] = ratio[pos]
            pos += 1
        if self.basicColor6CheckBox.isChecked():
            self.ratio[5] = ratio[pos]
            pos += 1
        self.printRatio()
        self.graphView.clear()
        self.graphView.plot(list(range(400, 701, 10)), self.sampleData, 'b')
        self.graphView.plot(list(range(400, 701, 10)), sampleRes, 'r')

        self.setColorBox(0, Helper.sampleToRGB(self.sampleData))
        self.setColorBox(1, Helper.sampleToRGB(sampleRes))

        LAB1 = Helper.sampleToCIELAB(self.sampleData)
        LAB2 = Helper.sampleToCIELAB(sampleRes)
        self.sampleColorCIELAB.setText('CIELAB: (%.2f, %.2f, %.2f)' %
                                       (LAB1[0], LAB1[1], LAB1[2]))
        self.computeColorCIELAB.setText('CIELAB: (%.2f, %.2f, %.2f)' %
                                        (LAB2[0], LAB2[1], LAB2[2]))

        QMessageBox.about(self, 'Thông báo', 'Đã tính toán xong!')
def main():
    p1 = Process.Process(1)
    p2 = Process.Process(2)
    pm = Process.process_manager()
    pm.add_Process(p1)
    pm.add_Process(p2)
    Mailer = Message_Passing.Message_Passing(True, True, 2, pm)

    t1 = threading.Thread(target=process1, args=(p1, Mailer))
    t2 = threading.Thread(target=process2, args=(p2, Mailer))

    t1.start()
    t2.start()
Esempio n. 29
0
 def burnProject(self, answer=True):
     if not answer:
         return
     if self.project.settings.authormode.getValue() == "data_ts":
         job = Process.DVDdataJob(self.project)
         job_manager.AddJob(job)
         job_manager.in_background = False
         self.session.openWithCallback(self.JobViewCB, JobView, job)
     else:
         job = Process.DVDJob(self.project)
         job_manager.AddJob(job)
         job_manager.in_background = False
         self.session.openWithCallback(self.JobViewCB, JobView, job)
Esempio n. 30
0
def train():
    with tf.Graph().as_default():
        global_step = tf.Variable(0, trainable=False)
        images, labels = Process.inputs()
        forward_propgation_results = Process.forward_propagation(images)

        cost = Process.error(forward_propgation_results, labels)

        train_op = Process.train(cost, global_step)

        saver = tf.train.Saver()

        summary_op = tf.merge_all_summaries()

        init = tf.initialize_all_variables()

        sess = tf.InteractiveSession()

        sess.run(init)

        saver = tf.train.Saver(tf.all_variables())

        tf.train.start_queue_runners(sess = sess)

        train_dir = "/home/zan/nn-data"

        summary_writer = tf.train.SummaryWriter(train_dir, sess.graph)

        for step in xrange(650):
            start_time = time.time()
            _, loss_value = sess.run([train_op, cost])
            duration = time.time() - start_time

            assert not np.isnan(loss_value)

            if step % 1 == 0:
                num_examples_per_step = FLAGS.batch_size
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = float(duration)

                format_str = ('%s: step %d, (%.1f examples/sec; %.3f ''sec/batch)')
                print (format_str % (datetime.now(), step, examples_per_sec, sec_per_batch))

                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)


            if step % 20 or (step + 1) == 20:
                checkpoint_path = os.path.join(train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
Esempio n. 31
0
def checkinstall():
	if Process.verifyCommands("convert%ImageMagick")==False:
		info=gtk.MessageDialog(type=gtk.MESSAGE_ERROR,
                    buttons=gtk.BUTTONS_OK,message_format="Error al instalar Imagemagick")
		info.run()
		info.destroy()
		sys.exit()
Esempio n. 32
0
def remove_dupes_from_database(itunes, dupes):
    dupe_files = []
    inverse_dupes = OrderedDict()
    tracks = itunes[0][TRACKS_FIELD]
    for id, deletions in dupes.items():
        good_track = tracks[str(id)]
        for dtrack in deletions:
            delete_id = dtrack[TRACK_ID_FIELD]
            inverse_dupes[delete_id] = id
            good_track[PLAY_COUNT_FIELD] = good_track.get(PLAY_COUNT_FIELD, 0) + dtrack.get(PLAY_COUNT_FIELD, 0)
            del tracks[str(delete_id)]
            dupe_file = Process.get_filename(dtrack)
            if dupe_file:
                dupe_files.append(dupe_file)

    for playlist in itunes[0][PLAYLISTS_FIELD]:
        try:
            items = playlist[ITEMS_FIELD]
        except:
            # print('No items in playlist', playlist.get('Playlist ID', None))
            continue
        for track in items:
            replacement = inverse_dupes.get(track[TRACK_ID_FIELD])
            if replacement is not None:
                track[TRACK_ID_FIELD] = replacement
    return dupe_files
Esempio n. 33
0
    def run(self):
        self.setUp()
        process = Process()
        ret = process.run("./fbuilder -c ../../conf/fbuilder.xml")
        if ret[2]:
            print ("fbuilder FAILED.")
            return False

        ret = process.run("./fxist4c -s ../../examples/data/plain_utf8_index" " -e query=BODY:2 -l ../../sysdata")
        open("output.txt", "a").write(ret[0])
        if ret[2]:
            print ("fxist4c FAILED.")
            return False

        self.tearDown()
        return True
Esempio n. 34
0
def evaluate():
  with tf.Graph().as_default() as g:
    images, labels = Process.eval_inputs()
    forward_propgation_results = Process.forward_propagation(images)
    init_op = tf.initialize_all_variables()
    saver = tf.train.Saver()
    top_k_op = tf.nn.in_top_k(forward_propgation_results, labels, 1)

  with tf.Session(graph = g) as sess:
    tf.train.start_queue_runners(sess = sess)
    sess.run(init_op)
    saver.restore(sess, eval_dir)
    #if ckpt and ckpt.model_checkpoint_path:
      #  saver.restore(sess, ckpt.model_checkpoint_path)

    for i in range(100):
        print(sess.run(top_k_op))
Esempio n. 35
0
 def run(self, cmd, warn_only = False, quiet = False, vewy_quiet = False,
         abandon_output = True):
     # Don't use single quotes in `cmd`, this will break and end badly.
     cmd = cmd.format(host = self.hostname)
     cmd = self.prefix(cmd)
     print(self.hostname + ' =>')
     if vewy_quiet:
         # Be vewy, vewy quiet, I'm hunting wabbits.
         print('[command hidden]\n')
         quiet = True
     else:
         print(cmd + '\n')
     cmd = '''ssh -nT '{0}' "bash -c '{1}'"'''.format(self.host, cmd)
     try:
         return Process.run(cmd, quiet, abandon_output)
     except Exception as e:
         if warn_only:
             print(str(e) + '---------- This was only a warning, ' +
                     'it won\'t stop the execution --\n')
             return None
         else:
             raise e
Esempio n. 36
0
    def sys_get_proc(self, queries, is_local):
        """
        Get system informations (CPU, memory, IO read & write)
        for each process PID using psutil module.
        """
        process = {}
        if not is_local:
            return process
        for sq in queries:
            try:
                proc = psutil.Process(sq['pid'])
                p = Process(
                    pid = sq['pid'],
                    database = sq['database'],
                    user = sq['user'],
                    client = sq['client'],
                    duration = sq['duration'],
                    wait = sq['wait'],
                    query = self.clean_str(sq['query']),
                    extras = {}
                    )

                p.setExtra('meminfo',       proc.get_memory_info())
                p.setExtra('io_counters',   proc.get_io_counters())
                p.setExtra('io_time',       time.time())
                p.setExtra('mem_percent',   proc.get_memory_percent())
                p.setExtra('cpu_percent',   proc.get_cpu_percent(interval=0))
                p.setExtra('cpu_times',     proc.get_cpu_times())
                p.setExtra('read_delta',    0)
                p.setExtra('write_delta',   0)
                p.setExtra('io_wait',       self.sys_get_IOW_status(str(proc.status)))
                p.setExtra('psutil_proc',   proc)
                process[p.pid] = p

            except psutil.NoSuchProcess:
                pass
            except psutil.AccessDenied:
                pass
        return process
Esempio n. 37
0
       
    mean = calMean(dataDict)
    var = 0.0
    for data in dataDict.values():
        var += (data - mean) ** 2    
    stdDeva = math.sqrt(var)
    return stdDeva

"""
select data
"""
def selectData(dataList):
    dataDict = {}
    for data in dataList:
        if data[0] not in dataDict:
            dataDict[data[0]] = data[3]
    return dataDict

if __name__ == '__main__':
    dataDict = {}
    dataList = Process.process('data1')
    dataDict = selectData(dataList)      
    print 'calMean(dataDict)'
    print "%.5f" % calMean(dataDict) 
    print 'calMeadian(dataDict)'
    print "%.5f" % calMeadian(dataDict) 
    print 'calStdDeviation(dataDict)'
    print "%.5f" % calStdDeviation(dataDict) 


Esempio n. 38
0
#!/usr/bin/python

import itertools
import re
import itertools
from Process import*

p = Process()

s = "abc__"
t = ''
alphabet = 'abcdefghijklmnopqrstuvwxyz'
blank_positions = []
count_blank = 0
position = 0

collected_lst = []
lst = list(s)

for c in s:
    if (c == "_"):
        count_blank += 1
        blank_positions.append(position)
    position +=1

if (count_blank > 2):
    print "error"
elif (count_blank == 2):
    for char_1 in alphabet:
        lst[blank_positions[0]] = char_1
        for char_2 in alphabet:
Esempio n. 39
0
def train():
    with tf.Graph().as_default():

        images, labels = Process.inputs()

        forward_propgation_results = Process.forward_propagation(images)

        cost, train_loss = Process.error(forward_propgation_results, labels)

        image_summary_t = tf.image_summary(images.name, images, max_images=2)

        summary_op = tf.merge_all_summaries()

        init = tf.initialize_all_variables()

        saver = tf.train.Saver()

        sess = tf.InteractiveSession()

        sess.run(init)

        saver = tf.train.Saver(tf.all_variables())

        tf.train.start_queue_runners(sess=sess)

        train_dir = "/home/zan/Desktop/Neural-Network-Prostate"

        summary_writer = tf.train.SummaryWriter(train_dir, sess.graph)

        for step in xrange(650):
            start_time = time.time()
            print (sess.run([train_loss, cost]))
            duration = time.time() - start_time

            if step % 1 == 0:
                num_examples_per_step = FLAGS.batch_size
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = float(duration)

                format_str = "%s: step %d, (%.1f examples/sec; %.3f " "sec/batch)"
                print (format_str % (datetime.now(), step, examples_per_sec, sec_per_batch))

                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)

            if step % 650 == 650:
                checkpoint_path = os.path.join(train_dir, "model.ckpt")
                saver.save(sess, checkpoint_path)
                print "hello"

                sess.close()

                with tf.Session() as sess:

                    eval_images, eval_labels = Process.eval_inputs()

                    evaluation_forward_propagation_results = Process.forward_propagation(eval_images)
                    top_k_op = Process.evaluate(evaluation_forward_propagation_results, eval_labels)

                    for i in range(16):
                        print (sess.run([top_k_op]))
Esempio n. 40
0
 def __init__(self, _id):
     Process.__init__(self, _id, os.getpid(), socket.gethostname(),
                      time.time())
     self._listeners = []
     self._knownProcesses = set()
Esempio n. 41
0
 def __reduce__(self):
     return addConnectionPossibilities, (R(Process.__reduce__(self)),
                                       self.getConnectionPossibilities())
                + '\'" "'
                + destino
                + '" "'
                + destino
                + '"'
            )
        else:
            nombre = ""

    else:
        alerta(archivo + "\n\nTipo no soportado.", gtk.MESSAGE_WARNING)


# PROGRAMA

if Process.verifyCommands("convert%ImageMagick") == False:
    sys.exit()

w = gtk.Window(gtk.WINDOW_TOPLEVEL)
w.set_title("Imagenes Kindle 3")
w.set_border_width(20)

w.connect("destroy", gtk.main_quit)

# tabla

tableMin = gtk.Table(3, 2, False)
tableMin.set_border_width(10)

tableMin.set_row_spacings(10)
tableMin.set_col_spacings(8)
Esempio n. 43
0
 def stopServer(self):
     if (self.serverProcess is not None):
         Process.stopProcess(self.serverProcess)
         self.serverProcess = None
Esempio n. 44
0
 def test_string_to_date(self):
     self.assertEqual(Process.stringToDate("2015-03-10 00:47:24"),
                                           datetime.datetime(2015,3,10,0,47,24))
Esempio n. 45
0
def main():
    
    #define new objects
    preprocess = Preprocess()
    process = Process()
    points = Points()
    postprocess = Postprocess()
    
    #declare and initialize variables
    search_string = ''
    option = 0
    count2 = 0      #delete this
    reordered_search_string = ''
    permutation_set = set()
    temp_permutation_set = set()
    permutation_list = []     #2D list
    blank_permutation_list = []
    filtered_content = []
    sorted_results = []
    final_results = []
    sorted_final_results = []
    

    #menu options
    print "\nSearch options:\n"
    print "1. Search for words" 
    print "2. Search for words starting with"
    print "3. Search for words ending with"
    print "4. Search for words containing"
    print "5. Search with blank tiles (use the underscore character to represent blanks)\n"
    #option = int(raw_input("Choose option:"))
    option = 1
    #search_string = raw_input('Please input tiles for search: ').lower()
    search_string = "andrew"
    
    #basic input check
    if (preprocess.checkInput(search_string)):
        reordered_search_string = preprocess.reorderString(search_string) #alphabetize tiles
    else:
        sys.exit()

    t1 = time.time()    #diagnostics
    #Input(search_string, option)    #turned into function for testing purposes
    if (option == 0):   #no option chosen
        print "ERROR: No option chosen, exiting."
        sys.exit()
    elif(option == 1):
        print "Searching for words...\n"
        permutation_list = process.stringPermutations(reordered_search_string)
        filtered_content = process.collectDictionarySegments(reordered_search_string)
        sorted_results = process.findWords(permutation_list, filtered_content)
        final_results = points.associatePointScore(sorted_results)
    elif(option == 2):
        print "Searching for words starting with: ", search_string, "\n"
        filtered_content = process.collectDictionarySegments(search_string[0])  #get first letter int he word being searched
        sorted_results = process.findWordsContaining(search_string, filtered_content, option)
        final_results = points.associatePointScore(sorted_results)
    elif(option == 3):
        print "Searching for words ending in: ", search_string, "\n"
        alphabet = 'abcdefghijklmnopqrstuvwxyz'
        filtered_content = process.collectDictionarySegments(alphabet)
        sorted_results = process.findWordsContaining(search_string, filtered_content, option)
        final_results = points.associatePointScore(sorted_results)
    elif(option == 4):
        print "Searching for words containing: ", search_string, "\n"
        alphabet = 'abcdefghijklmnopqrstuvwxyz'
        filtered_content = process.collectDictionarySegments(alphabet)
        sorted_results = process.findWordsContaining(search_string, filtered_content, option)
        final_results = points.associatePointScore(sorted_results)
    elif(option == 5):
        print "Searching with blank tiles...\n"
        alphabet = 'abcdefghijklmnopqrstuvwxyz'
        blank_permutation_list = process.blankTileProcessing(reordered_search_string)        
        filtered_content = process.collectDictionarySegments(alphabet)
        
        #TO DO: Creates a 2D list, gotta convert to 1D list - DONE
        #TO DO: find way to use union keyword to take out duplicates, it will take care of one nested for loop in findWords function - DONE
        #TO DO: Do another union - DONE
            # time vs duplication trade off. Takes longer to take out the duplicates with the union
        for blank_permutation_string in blank_permutation_list:
            #permutation_list.extend(process.stringPermutations(blank_permutation_string))
            temp_permutation_set = set(process.stringPermutations(blank_permutation_string))
            permutation_set = permutation_set.union(temp_permutation_set)
        permutation_list = list(permutation_set)
        
        sorted_results = process.findWords(permutation_list, filtered_content)
        final_results = points.associatePointScore(sorted_results)
    else:
        print "ERROR: Please choose an option between 1-5"
        sys.exit()
    t2 = time.time() - t1   #diagnostics
    
    sorted_option = 0
    print "Results found and processed. Sort results by...\n"
    print "1. Points - lowest to highest"
    print "2. Points - highest to lowest"
    print "3. Length - longest to shortest"
    print "4. Length - shortest to longest"
    sorted_option = int(raw_input("choose option: "))
    print "Option", sorted_option, "chosen"
    
    if (sorted_option == 1):
        print "Sorting results by points, highest to lowest\n"
        sorted_final_results = postprocess.resultsByPoints(final_results)
    elif (sorted_option == 2):
        print "Sorting results by points, lowest to highest\n"
        sorted_final_results = postprocess.resultsByPointsReverse(final_results)
    elif (sorted_option == 3):
        print "Sorting results by length, longest to shortest\n"
        sorted_final_results = postprocess.resultsByLength(final_results)
    elif (sorted_option == 4):
        print "Sorting results by length, shortest to longest\n"
        sorted_final_results = postprocess.resultsByLengthReverse(final_results)
    else:
        print "Option 1-4 not chosen, outputting results by default order"
        sorted_final_results = final_results
        
    Output(sorted_final_results, t2)
#!/usr/bin/python
import commands
from Process import *
from Info import *

print ("PYGR04 - Help machines to control!\n")

processes = commands.getoutput("ps aux").split("\n")
del processes[0]

psaux = []

for process in processes:
    columns = " ".join(process.split()).split(" ")

    new = Process()
    new.user = columns[0]
    new.pid = columns[1]
    new.cpu = columns[2]
    new.mem = columns[3]
    new.vsz = columns[4]
    new.rss = columns[5]
    new.tty = columns[6]
    new.stat = columns[7]
    new.start = columns[8]
    new.time = columns[9]
    new.command = columns[10]

    psaux.append(new)

print ("There are %d active processes" % (len(psaux)))
def ProcesarImagen(archivo):
    if (
        Process.getExtension(archivo) == "JPG"
        or Process.getExtension(archivo) == "PNG"
        or Process.getExtension(archivo) == "BMP"
    ):

        # Construimos nombre de fichero resultado
        destino = Process.getPath(archivo) + "/kindle_" + Process.getName(archivo) + ".png"

        # rotamos si es necesario para mantener la mejor resolucion, ademas trabajamos con ficheros png
        pb = gtk.gdk.pixbuf_new_from_file(archivo)
        if pb.get_height() < pb.get_width():
            opcGiro = "-rotate -90"
        else:
            opcGiro = ""

        Process.ProcessFileByArgument("convert " + opcGiro + " " + archivo + " " + destino)

        # pasamos a grises y ajustamos tamaño
        Process.ProcessFileByArgument(
            "convert -colorspace Gray -resize 550x750> -gravity center -background Gray -extent 550x750 "
            + destino
            + " "
            + destino
        )

        # añadimos borde en negro para completa 600x800
        Process.ProcessFileByArgument("convert -border 25x25 -bordercolor black " + destino + " " + destino)

        # añadimos texto centrado en la base y con espaciado entre letras
        titulo = "Slide and release the power switch to wake"
        Process.ProcessFileByArgument(
            "convert -kerning 2 -font Ubuntu-Regular -fill white -pointsize 20 -draw \"gravity south text 0,0 '"
            + titulo
            + '\'" "'
            + destino
            + '" "'
            + destino
            + '"'
        )

        # añadimos credito en vertical
        credito = entryName.get_text()
        Process.ProcessFileByArgument(
            "convert -font Ubuntu-Regular -fill white -pointsize 12 -draw \"translate 590,150 rotate -90 text 0,0 '"
            + credito
            + '\'" "'
            + destino
            + '" "'
            + destino
            + '"'
        )

        # añadimos titulo de imagen en vertical
        nombre = Process.getName(archivo)
        if cbNomFic.get_active() == True:
            Process.ProcessFileByArgument(
                "convert -font Ubuntu-Regular -fill white -pointsize 15 -draw \"translate 20,775 rotate -90 text 0,0 '"
                + nombre
                + '\'" "'
                + destino
                + '" "'
                + destino
                + '"'
            )
        else:
            nombre = ""

    else:
        alerta(archivo + "\n\nTipo no soportado.", gtk.MESSAGE_WARNING)
Esempio n. 48
0
finished=0
print("Rut Checker")
start=int(input("Range Start:"))
end=int(input("Range End:"))
print ("Cracking captcha key...                "+'\r', end="")
updateKey()
start_time=time.time()
sys.stdout.flush()
print ("Starting...              "+'\r', end="")
for rut in range(start, end):
    if Uploader.checkrut(rut)==0:
        print("Doing missing rut "+str(rut))
        data = Extractor.http_request(rut,key)
        if data==0:
            print("Rut "+str(rut)+" timed out.")
            updatePercentage(rut)
        else:
            if Process.validate(data) == 1:
                key = Keymaker.generate()
                time.sleep(5)
                print("Rut "+str(rut)+" broken key.")
            else:
                Uploader.updata(rut,Process.process(data))
    updatePercentage(rut)






Esempio n. 49
0
 def __str__(self):
     return 'This' + Process.__str__(self)
Esempio n. 50
0
 def startServer(self):
     self.stopServer()
     self.serverProcess = Process.start(self.server)
Esempio n. 51
0
def main(args=None):
	rerun = []
	args = Parse.get_args(Parse.get_parser())
	resubmit = False
	if args.which in ['snv','snvgroup','meta','merge','resubmit','tools']:
		if args.which == 'resubmit':
			with open(args.dir + '/' + os.path.basename(args.dir) + '.args.pkl', 'rb') as p:
				qsub = args.qsub if args.qsub else None
				args,cfg = pickle.load(p)
				if qsub:
					cfg['qsub'] = qsub
			with open(cfg['out'] + '/' + os.path.basename(cfg['out']) + '.rerun', 'r') as f:
				rerun = [int(line.rstrip()) for line in f]
			cfg['replace'] = True
			resubmit = True
		else:
			cfg = getattr(Parse, 'generate_' + args.which + '_cfg')(args.ordered_args)
	elif args.which != 'settings':
		cfg = getattr(Parse, 'generate_' + args.which + '_cfg')(args.ordered_args)

	##### read settings file #####
	ini = SafeConfigParser()
	ini.read(resource_filename('uga', 'settings.ini'))

	##### locate qsub wrapper #####
	qsub_wrapper = ini.get('main','wrapper')
	if 'qsub' in args and not os.access(ini.get('main','wrapper'),os.X_OK):
		print Process.print_error('uga qsub wrapper ' + ini.get('main','wrapper') + ' is not executable')
		return

	##### distribute jobs #####
	if args.which in ['snv','snvgroup','meta','merge','tools']:
		run_type = 0
		if cfg['cpus'] is not None and cfg['cpus'] > 1:
			run_type = run_type + 1
		if cfg['split'] and cfg['qsub'] is not None:
			run_type = run_type + 10
		if cfg['split_n'] and cfg['qsub'] is not None:
			run_type = run_type + 100
			
		if resubmit:
			jobs_df = pd.read_table(cfg['out'] + '/' + cfg['out'] + '.jobs')
		else:
			if args.which in ['snv','tools']:
				#	generate regions dataframe with M rows, either from --snv-map or by splitting data file or --snv-region according to --mb
				#	run_type = 0:   run as single job
				#	run_type = 1:   --cpus C (distribute M regions over C cpus and run single job, 1 job C cpus)
				#	run_type = 10:  --split (split M regions into single region jobs, M jobs 1 cpu)
				#	run_type = 100: --split-n N (distribute M regions over N jobs, N jobs 1 cpu)
				#	run_type = 11:  --split, --cpus C (split M regions into chunks of size M / C and run M jobs, M jobs C cpus)
				#	run_type = 101: --split-n N, --cpus C (distribute M regions over N jobs and distribute each over C cpus, N jobs C cpus)

				if cfg['region_file']:
					jobs_df = pd.read_table(cfg['region_file'],header=None,names=['region'], compression='gzip' if cfg['region_file'].split('.')[-1] == 'gz' else None)
					jobs_df['chr'] = [x.split(':')[0] for x in jobs_df['region']]
					jobs_df['chr_idx'] = [int(x.split(':')[0].replace('X','23').replace('Y','24')) for x in jobs_df['region']]
					jobs_df['start'] = [int(x.split(':')[1].split('-')[0]) for x in jobs_df['region']]
					jobs_df['end'] = [int(x.split(':')[1].split('-')[1]) for x in jobs_df['region']]
					jobs_df['job'] = 1
					jobs_df['cpu'] = 1
				else:
					snv_map = []
					data_files = []
					if args.which == 'snv':
						for m in cfg['models']:
							if cfg['models'][m]['file'] not in data_files:
								snv_map.extend(Map.map(file=cfg['models'][m]['file'], mb = cfg['mb'], region = cfg['region']))
								data_files.append(cfg['models'][m]['file'])
					else:
						snv_map.extend(Map.map(file=cfg['file'], mb = cfg['mb'], region = cfg['region']))
					snv_map = list(set(snv_map))
					jobs_df = pd.DataFrame({'region': snv_map, 'chr': [x.split(':')[0] for x in snv_map], 'chr_idx': [int(x.split(':')[0].replace('X','23').replace('Y','24')) for x in snv_map], 'start': [int(x.split(':')[1].split('-')[0]) for x in snv_map], 'end': [int(x.split(':')[1].split('-')[1]) for x in snv_map]})
					jobs_df['job'] = 1
					jobs_df['cpu'] = 1
					del data_files
					del snv_map
				jobs_df.sort_values(by=['chr_idx','start'],inplace=True)
				jobs_df = jobs_df[['chr','start','end','region','job','cpu']]
				jobs_df.reset_index(drop=True,inplace=True)
			if args.which in ['meta','merge']:
				#	generate regions dataframe with M rows, either from --snv-map or by splitting data file or --snv-region according to --mb
				#	run_type = 0:   run as single job
				#	run_type = 1:   --cpus C (distribute M regions over C cpus and run single job, 1 job C cpus)
				#	run_type = 10:  --split (split M regions into single region jobs, M jobs 1 cpu)
				#	run_type = 100: --split-n N (distribute M regions over N jobs, N jobs 1 cpu)
				#	run_type = 11:  --split, --cpus C (split M regions into chunks of size M / C and run M jobs, M jobs C cpus)
				#	run_type = 101: --split-n N, --cpus C (distribute M regions over N jobs and distribute each over C cpus, N jobs C cpus)
				if cfg['region_file']:
					jobs_df = pd.read_table(cfg['region_file'],header=None,names=['region'], compression='gzip' if cfg['region_file'].split('.')[-1] == 'gz' else None)
					jobs_df['chr'] = [int(x.split(':')[0]) for x in jobs_df['region']]
					jobs_df['start'] = [int(x.split(':')[1].split('-')[0]) for x in jobs_df['region']]
					jobs_df['end'] = [int(x.split(':')[1].split('-')[1]) for x in jobs_df['region']]
					jobs_df['job'] = 1
					jobs_df['cpu'] = 1
				else:
					snv_map = []
					data_files = []
					for f in cfg['files']:
						if f not in data_files:
							snv_map.extend(Map.map(file=cfg['files'][f], mb = cfg['mb'], region = cfg['region']))
							data_files.append(cfg['files'][f])
					snv_map = list(set(snv_map))
					jobs_df = pd.DataFrame({'region': snv_map, 'chr': [int(x.split(':')[0]) for x in snv_map], 'start': [int(x.split(':')[1].split('-')[0]) for x in snv_map], 'end': [int(x.split(':')[1].split('-')[1]) for x in snv_map]})
					jobs_df['job'] = 1
					jobs_df['cpu'] = 1
					del data_files
					del snv_map
				jobs_df = jobs_df[['chr','start','end','region','job','cpu']]
				jobs_df.sort_values(by=['chr','start'],inplace=True)
				jobs_df.reset_index(drop=True,inplace=True)

			if args.which == 'snvgroup':
				#	generate regions dataframe with M rows from --snvgroup-map
				#	run_type = 0:   run as single job
				#	run_type = 1:   --cpus C (distribute M snvgroups over C cpus and run single job, 1 job C cpus)
				#	run_type = 10:  --split (split M snvgroups into single region jobs, M jobs 1 cpu)
				#	run_type = 100: --split-n N (distribute M snvgroups over N jobs, N jobs 1 cpu)
				#	run_type = 101: --split-n N, --cpus C (distribute M snvgroups over N jobs and distribute each job over C cpus, N jobs C cpus)

				if cfg['region_file']:
					jobs_df = pd.read_table(cfg['region_file'],header=None,names=['region','group_id'], compression='gzip' if cfg['region_file'].split('.')[-1] == 'gz' else None)
					jobs_df['chr'] = [int(x.split(':')[0]) for x in jobs_df['region']]
					jobs_df['chr_idx'] = 1
					jobs_df['start'] = [int(x.split(':')[1].split('-')[0]) for x in jobs_df['region']]
					jobs_df['end'] = [int(x.split(':')[1].split('-')[1]) for x in jobs_df['region']]
					jobs_df['job'] = 1
					jobs_df['cpu'] = 1
					jobs_df = jobs_df[['chr','start','end','region','group_id','job','cpu']]
					jobs_df.sort_values(by=['chr','start'],inplace=True)
					jobs_df.reset_index(drop=True,inplace=True)
				elif cfg['region']:
					snv_map = []
					data_files = []
					for m in cfg['models']:
						if cfg['models'][m]['file'] not in data_files:
							snv_map.extend(Map.map(file=cfg['models'][m]['file'], mb = 1000, region = cfg['region']))
							data_files.append(cfg['models'][m]['file'])
					snv_map = list(set(snv_map))
					jobs_df = pd.DataFrame({'region': snv_map, 'chr': [int(x.split(':')[0]) for x in snv_map], 'start': [int(x.split(':')[1].split('-')[0]) for x in snv_map], 'end': [int(x.split(':')[1].split('-')[1]) for x in snv_map]})
					jobs_df['group_id'] = cfg['region']
					jobs_df['job'] = 1
					jobs_df['cpu'] = 1
					del data_files
					del snv_map
					jobs_df = jobs_df[['chr','start','end','region','group_id','job','cpu']]
					jobs_df.sort_values(by=['chr','start'],inplace=True)
					jobs_df.reset_index(drop=True,inplace=True)
				else:
					if cfg['snvgroup_map']:
						snvgroup_map = pd.read_table(cfg['snvgroup_map'],header=None,names=['chr','pos','marker','group_id'], compression='gzip' if cfg['snvgroup_map'].split('.')[-1] == 'gz' else None)
						jobs_df = snvgroup_map[['chr','pos','group_id']]
						jobs_df=jobs_df.groupby(['chr','group_id'])
						jobs_df = jobs_df.agg({'pos': [np.min,np.max]})
						jobs_df.columns = ['start','end']
						jobs_df['chr'] = jobs_df.index.get_level_values('chr')
						jobs_df['group_id'] = jobs_df.index.get_level_values('group_id')
						jobs_df['region'] = jobs_df.chr.map(str) + ':' + jobs_df.start.map(str) + '-' + jobs_df.end.map(str)
						jobs_df['job'] = 1
						jobs_df['cpu'] = 1
						jobs_df = jobs_df[['chr','start','end','region','group_id','job','cpu']]
						jobs_df.drop_duplicates(inplace=True)
						jobs_df.sort_values(by=['chr','start'],inplace=True)
						jobs_df.reset_index(drop=True,inplace=True)

			if jobs_df.empty:
				print Process.print_error('job list is empty, no variants found in region/s specified')
				return
			if run_type == 1:
				n = int(np.ceil(jobs_df.shape[0] / float(cfg['cpus'])))
				n_remain = int(jobs_df.shape[0] - (n-1) * cfg['cpus'])
				jobs_df['cpu'] = np.append(np.repeat(range(cfg['cpus'])[:n_remain],n),np.repeat(range(cfg['cpus'])[n_remain:],n-1)).astype(np.int64) + 1
			elif run_type == 10:
				jobs_df['job'] = jobs_df.index.values + 1
			elif run_type == 100:
				n = int(np.ceil(jobs_df.shape[0] / float(cfg['split_n'])))
				n_remain = int(jobs_df.shape[0] - (n-1) * cfg['split_n'])
				jobs_df['job'] = np.append(np.repeat(range(cfg['split_n'])[:n_remain],n),np.repeat(range(cfg['split_n'])[n_remain:],n-1)).astype(np.int64) + 1
			elif run_type == 11 and args.which != 'snvgroup':
				cfg['split_n'] = int(np.ceil(jobs_df.shape[0] / float(cfg['cpus'])))
				n = int(np.ceil(jobs_df.shape[0] / float(cfg['split_n'])))
				n_remain = int(jobs_df.shape[0] - (n-1) * cfg['split_n'])
				jobs_df['job'] = np.append(np.repeat(range(cfg['split_n'])[:n_remain],n),np.repeat(range(cfg['split_n'])[n_remain:],n-1)).astype(np.int64) + 1
				for i in range(1,int(max(jobs_df['job'])) + 1):
					n = int(np.ceil(jobs_df[jobs_df['job'] == i].shape[0] / float(cfg['cpus'])))
					n_remain = int(jobs_df[jobs_df['job'] == i].shape[0] - (n-1) * cfg['cpus'])
					jobs_df.loc[jobs_df['job'] == i,'cpu'] = np.append(np.repeat(range(cfg['cpus'])[:n_remain],n),np.repeat(range(cfg['cpus'])[n_remain:],n-1)).astype(np.int64) + 1
				cfg['split'] = None
			elif run_type == 101:
				n = int(np.ceil(jobs_df.shape[0] / float(cfg['split_n'])))
				n_remain = int(jobs_df.shape[0] - (n-1) * cfg['split_n'])
				jobs_df['job'] = np.append(np.repeat(range(cfg['split_n'])[:n_remain],n),np.repeat(range(cfg['split_n'])[n_remain:],n-1)).astype(np.int64) + 1
				for i in range(1,int(max(jobs_df['job'])) + 1):
					n = int(np.ceil(jobs_df[jobs_df['job'] == i].shape[0] / float(cfg['cpus'])))
					n_remain = int(jobs_df[jobs_df['job'] == i].shape[0] - (n-1) * cfg['cpus'])
					jobs_df.loc[jobs_df['job'] == i,'cpu'] = np.append(np.repeat(range(cfg['cpus'])[:n_remain],n),np.repeat(range(cfg['cpus'])[n_remain:],n-1)).astype(np.int64) + 1
			if int(max(jobs_df['job'])) + 1 > 100000:
				print Process.print_error('number of jobs exceeds 100,000, consider using --split-n to reduce the total number of jobs')
				return
			

	if args.which in ['snv','snvgroup','meta','merge','tools']:
		print 'detected run type ' + str(run_type) + ' ...'
		if len(rerun) == 0:
			if int(max(jobs_df['job'])) > 1 and cfg['qsub'] is not None:
				if 'mb' in cfg:
					print '   ' + str(jobs_df.shape[0]) + ' regions of size ' + str(cfg['mb']) + 'mb detected'
				else:
					print '   ' + str(jobs_df.shape[0]) + ' regions detected'
				print '   an array containing ' + str(int(max(jobs_df['job']))) + ' tasks will be submitted'
				print '   <= ' + str(max(np.bincount(jobs_df['job']))) + ' regions per task'
				print '   <= '  + str(int(max(jobs_df['cpu']))) + ' cpus per task'
				print '   qsub options: ' + cfg['qsub']
				print '   output directory: ' + cfg['out']
				print '   replace: ' + str(cfg['replace'])
				input_var = None
				while input_var not in ['y','n','Y','N']:
					input_var = raw_input('\nsubmit jobs (yY/nN)? ')
				if input_var.lower() == 'n':
					print 'canceled by user'
					return

			if os.path.exists(cfg['out']):
				if args.replace:
					print 'deleting old data'
					try:
						shutil.rmtree(cfg['out'])
					except OSError:
						print Process.print_error('unable to replace results directory' + cfg['out'])
				else:
					print Process.print_error('results directory ' + cfg['out'] + ' already exists, use --replace to overwrite existing results')
					return
			try:
				os.mkdir(cfg['out'])
			except OSError:
				pass

			with open(cfg['out'] + '/' + os.path.basename(cfg['out']) + '.args.pkl', 'wb') as p:
				pickle.dump([args, cfg], p)

			if run_type in [10,11,100,101] and jobs_df.shape[0] > 1:
				print "initializing job array database ..."
				try:
					os.mkdir(cfg['out'] + '/temp')
				except OSError:
					pass
				for j in range(1, int(max(jobs_df['job'])) + 1):
					try:
						os.mkdir(cfg['out'] + '/jobs' + str(100 * ((j-1) / 100) + 1) + '-' + str(100 * ((j-1) / 100) + 100))
					except OSError:
						pass
					try:
						os.mkdir(cfg['out'] + '/jobs' + str(100 * ((j-1) / 100) + 1) + '-' + str(100 * ((j-1) / 100) + 100) + '/job' + str(j))
					except OSError:
						pass
				with open(cfg['out'] + '/' + cfg['out'] + '.files', 'w') as jlist:
					for j in range(1, int(max(jobs_df['job'])) + 1):
						if args.which in ['snv','snvgroup','tools','merge']:
							if 'model_order' in cfg:
								for m in cfg['model_order']:
									if m != '___no_tag___':
										jlist.write(str(j) + '\t' + cfg['out'] + '.' + m + '.gz' + '\t' + cfg['out'] + '/jobs' + str(100 * ((j-1) / 100) + 1) + '-' + str(100 * ((j-1) / 100) + 100) + '/job' + str(j) + '/' + cfg['out'] + '.job' + str(j) + '.' + m + '.gz\n')
									else:
										jlist.write(str(j) + '\t' + cfg['out'] + '.gz' + '\t' + cfg['out'] + '/jobs' + str(100 * ((j-1) / 100) + 1) + '-' + str(100 * ((j-1) / 100) + 100) + '/job' + str(j) + '/' + cfg['out'] + '.job' + str(j) + '.gz\n')
							else:								
								jlist.write(str(j) + '\t' + cfg['out'] + '.gz' + '\t' + cfg['out'] + '/jobs' + str(100 * ((j-1) / 100) + 1) + '-' + str(100 * ((j-1) / 100) + 100) + '/job' + str(j) + '/' + cfg['out'] + '.job' + str(j) + '.gz\n')
						if 'meta_order' in cfg:
							if len(cfg['meta_order']) > 0:
								for m in cfg['meta_order']:
									jlist.write(str(j) + '\t' + cfg['out'] + '.' + m + '.gz' + '\t' + cfg['out'] + '/jobs' + str(100 * ((j-1) / 100) + 1) + '-' + str(100 * ((j-1) / 100) + 100) + '/job' + str(j) + '/' + cfg['out'] + '.job' + str(j) + '.' + m + '.gz\n')
			jobs_df.to_csv(cfg['out'] + '/' + cfg['out'] + '.jobs',header=True,index=False,sep="\t")
			with open(cfg['out'] + '/' + cfg['out'] + '.jobs.run','w') as f:
				f.write("\n".join([str(x) for x in jobs_df['job'].unique()]))
		else:
			if len(rerun) > 0 and cfg['qsub'] is not None:
				print 'detected resubmit ...'
				print '   an array containing ' + str(len(rerun)) + ' tasks will be submitted'
				print '   <= ' + str(max(np.bincount(jobs_df['job']))) + ' regions per job'
				print '   <= '  + str(int(max(jobs_df['cpu']))) + ' cpus per job'
				print '   qsub options: ' + cfg['qsub']
				print '   output directory: ' + cfg['out']
				print '   replace: ' + str(cfg['replace'])
				input_var = None
				while input_var not in ['y','n','Y','N']:
					input_var = raw_input('\nresubmit jobs (yY/nN)? ')
				if input_var.lower() == 'n':
					print 'canceled by user'
					return
			with open(cfg['out'] + '/' + cfg['out'] + '.jobs.run','w') as f:
				f.write("\n".join([str(x) for x in jobs_df['job'][jobs_df['job'].isin(rerun)]]))
			os.remove(cfg['out'] + '/' + os.path.basename(cfg['out']) + '.rerun')

	if args.which == 'settings':
		if 'ordered_args' in args:
			for k in args.ordered_args:
				ini.set('main',k[0],k[1])
			with open(resource_filename('uga', 'settings.ini'), 'w') as f:
				ini.write(f)
		print 'main settings ...'
		for s in ini.sections():
			for k in ini.options(s):
				print '   ' + k + ' = ' + ini.get(s,k)

	elif args.which in ['snv','snvgroup','meta','merge','resubmit','tools']:
		if cfg['qsub']:
			print "submitting jobs\n"
		out = cfg['out']
		joblist = range(1, int(max(jobs_df['job'])) + 1) if len(rerun) == 0 else rerun
		if int(max(jobs_df['job'])) > 1:
			cfg['out'] = out + '/jobsUGA_JOB_RANGE/jobUGA_JOB_ID/' + os.path.basename(out) + '.jobUGA_JOB_ID'
			cfg['job'] = 'UGA_JOB_ID'
			if cfg['qsub']:
				cfg['qsub'] = cfg['qsub'] + ' -t 1-' + str(len(joblist))
		else:
			cfg['out'] = out + '/' + os.path.basename(out)
			cfg['job'] = 1
			if cfg['qsub']:
				cfg['qsub'] = cfg['qsub'] + ' -t 1'
		args.ordered_args = [('out',cfg['out']),('region_file',out + '/' + out + '.jobs'),('job',cfg['job']),('cpus',int(max(jobs_df['cpu'])))] + [x for x in args.ordered_args if x[0] not in ['out','region_file','cpus']]
		cmd = 'Run' + args.which.capitalize() + '(' + str(args.ordered_args) + ')'
		if cfg['qsub']:
			Process.qsub(['qsub'] + cfg['qsub'].split() + ['-N',out,'-o',out + '/temp',qsub_wrapper],'\"' + cmd + '\"',out + '/' + out + '.jobs.run',cfg['out'] + '.log')
		else:
			Process.interactive(qsub_wrapper, cmd, cfg['out'] + '.' + args.which + '.log')

	elif args.which == 'compile':
		files = pd.read_table(args.dir + '/' + os.path.basename(args.dir) + '.files', names=['job','out','file'])
		complete, rerun = Fxns.verify_results(args.dir,files)
		if len(rerun) > 0:
			print Process.print_error('detected ' + str(len(rerun)) + ' failed jobs\n       use resubmit module to rerun failed jobs')
			with open(args.dir + '/' + os.path.basename(args.dir) + '.rerun', 'w') as f:
				f.write("\n".join([str(x) for x in rerun]))
		else:
			complete = Fxns.compile_results(args.dir,files)
			if complete:
				input_var = None
				while input_var not in ['y','n','Y','N']:
					input_var = raw_input('delete obselete job subdirectories and files for this project (yY/nN)? ')
				if input_var.lower() == 'n':
					print 'canceled by user'
				else:
					print 'deleting subdirectories'
					for d in glob.glob(args.dir + '/jobs*-*'):
						try:
							shutil.rmtree(d)
						except OSError:
							print Process.print_error('unable to delete job data directory ' + d)
					print 'deleting temporary directory'
					try:
						shutil.rmtree(args.dir + '/temp')
					except OSError:
						print Process.print_error('unable to delete temporary directory ' + args.dir + '/temp')
					print "deleting last job run list"
					try:
						os.remove(args.dir + '/' + os.path.basename(args.dir) + '.jobs.run')
					except OSError:
						print Process.print_error('unable to delete job run list ' + args.dir + '/' + os.path.basename(args.dir) + '.jobs.run')
			else:
				print Process.print_error('file compilation incomplete')

	elif args.which in ['snvgroupplot','snvplot']:
		cfg['out'] = '.'.join(cfg['file'].split('.')[0:len(cfg['file'].split('.'))-1]) + '.' + args.which
		args.ordered_args = [('out',cfg['out'])] + [x for x in args.ordered_args if x[0] not in ['out']]
		cmd = 'Run' + args.which.capitalize() + '(' + str(args.ordered_args) + ')'
		if cfg['qsub'] is not None:
			Process.qsub(['qsub'] + cfg['qsub'].split() + ['-o',cfg['out'] + '.log',qsub_wrapper],'\"' + cmd + '\"')
		else:
			Process.interactive(qsub_wrapper, cmd, cfg['out'] + '.log')

	elif args.which == 'filter':
		if os.path.exists(cfg['file'].replace('.gz','.' + cfg['tag'] + '.log')):
			if args.replace:
				try:
					os.remove(cfg['file'].replace('.gz','.' + cfg['tag'] + '.log'))
				except OSError:
					print Process.print_error('unable to remove existing log file ' + cfg['file'].replace('.gz','.' + cfg['tag'] + '.log'))
					return
			else:
				print Process.print_error('log file ' + cfg['file'].replace('.gz','.' + cfg['tag'] + '.log') + ' already exists, use --replace to overwrite existing results')
				return
		if os.path.exists(cfg['file'].replace('.gz','.' + cfg['tag'] + '.gz')):
			if args.replace:
				try:
					os.remove(cfg['file'].replace('.gz','.' + cfg['tag'] + '.gz'))
				except OSError:
					print Process.print_error('unable to remove existing inflation corrected results file ' + cfg['file'].replace('.gz','.' + cfg['tag'] + '.gz'))
			else:
				print Process.print_error('results file ' + cfg['file'].replace('.gz','.' + cfg['tag'] + '.gz') + ' already exists, use --replace to overwrite existing results')
				return
		if os.path.exists(cfg['file'].replace('.gz','.' + cfg['tag'] + '.gz.tbi')):
			if args.replace:
				try:
					os.remove(cfg['file'].replace('.gz','.' + cfg['tag'] + '.gz.tbi'))
				except OSError:
					print Process.print_error('unable to remove existing inflation corrected results index file ' + cfg['file'].replace('.gz','.' + cfg['tag'] + '.gz.tbi'))
			else:
				print Process.print_error('results index file ' + cfg['file'].replace('.gz','.' + cfg['tag'] + '.gz.tbi') + ' already exists, use --replace to overwrite existing results')
				return
		cmd = 'Run' + args.which.capitalize() + '(' + str(args.ordered_args) + ')'
		if cfg['qsub'] is not None:
			Process.qsub(['qsub'] + cfg['qsub'].split() + ['-o',cfg['file'].replace('.gz','.' + cfg['tag'] + '.log'),qsub_wrapper],'\"' + cmd + '\"')
		else:
			Process.interactive(qsub_wrapper, cmd, cfg['file'].replace('.gz','.' + cfg['tag'] + '.log'))
	else:
		print Process.print_error(args.which + " not a currently available module")

	print ''