Beispiel #1
1
def tkinter_clipboard_get():
    """ Get the clipboard's text using Tkinter.

    This is the default on systems that are not Windows or OS X. It may
    interfere with other UI toolkits and should be replaced with an
    implementation that uses that toolkit.
    """
    try:
        from tkinter import Tk, TclError  # Py 3
    except ImportError:
        try:
            from Tkinter import Tk, TclError  # Py 2
        except ImportError:
            raise TryNext("Getting text from the clipboard on this platform "
                          "requires Tkinter.")
    root = Tk()
    root.withdraw()
    try:
        text = root.clipboard_get()
    except TclError:
        raise ClipboardEmpty
    finally:
        root.destroy()
    text = py3compat.cast_unicode(text, py3compat.DEFAULT_ENCODING)
    return text
Beispiel #2
0
def file_chooser():
	root = Tk()
	root.files = filedialog.askopenfilenames(title='Choose files')
	if len(files) == 1:
		files = files[0]
	root.withdraw()
	return root.files
Beispiel #3
0
def downloadplaylist(url):
	print("Give a download location")
	root = Tk()
	root.withdraw()
	filename = filedialog.askdirectory()
	playlist = pafy.get_playlist(url)
	playlen = len(playlist['items'])
	i=0
	while i < playlen:
		pafyobj = playlist['items'][i]['pafy']
		aobj = pafyobj.getbest()
		print("---------------------------------------------------------")
		print("Now Downloading: "+pafyobj.title)
		print("Size is: %s" % aobj.get_filesize)
		print("Video Resolution is: "+aobj.resolution)
		fileloc = aobj.download(filepath=filename,quiet=False)

	print("Videos downloaded at: "+filename)
	print("Do you want to open the download location?\n1.Yes\n2.No")
	ch = int(input())
	if ch == 1:
		os.startfile(filename)
	else:
		print("Thank you for using this script!")
		print("EAT................SLEEP..................CODE...................REPEAT")
Beispiel #4
0
	def get():
		r = Tk()
		r.withdraw()
		returnValue = r.selection_get(selection = "CLIPBOARD")
		r.destroy()

		return returnValue if returnValue else ''
def save():
     root = Tk()
     root.withdraw()
     filename = filedialog.asksaveasfilename( filetypes = [("JSON files", ".json"),("All files", "*")],defaultextension='.json')
     if filename:
          
          d = {'points':[], 'lines':[]}
          for ppp in range(1,len(Point.points)):
               p = Point.points[ppp]
               pd = {
                    'pos': {'x': p.pos.x, 'y': p.pos.y},
                    'pinned' : p.pinned,
                    'size' : p.size,
                    'isMotor' : p.isMotor,
                    'spinPos': p.spinPos if p.spinPos == None else {'x': p.spinPos.x, 'y': p.spinPos.y},
                    'spinSpeed' : p.spinSpeed,
                    'currentSpin' : p.currentSpin
                    }
               d['points'].append(pd)
          for s in Stick.sticks:
               sd = {
                         'p1': save_helper_point_finder(s.p1),
                         'p2': save_helper_point_finder(s.p2),
                         'visible' : s.visible
                    }
               d['lines'].append(sd)
          f = open(filename, 'w')
          f.write(jsonDump(d, indent = 4))
          f.close()
Beispiel #6
0
	def add(str): # TODO: for linux adds some bad data, that breaks program (breaks even editor), but for windows worked
		if _platform == "win32" or _platform == "win64":
			r = Tk()
			r.withdraw()
			r.clipboard_clear()
			r.clipboard_append('i can has clipboardz?')
			r.destroy()
        class SystemNameClip(object):
            """
            A cross-platform wrapper for copying system names into
            the clipboard such that they can be pasted into the E:D
            galaxy search - which means forcing them into lower case
            because E:D's search doesn't work with upper case
            characters.
            """

            def __init__(self):
                self.tkroot = Tk()
                self.tkroot.withdraw()

            def strip(self, text, trailing):
                if text.endswith(trailing):
                    text = text[:-len(trailing)].strip()
                return text

            def copy_text(self, text):
                """ Copies the specified text into the clipboard. """
                text = text.lower().strip()
                text = self.strip(text, "(fix)")
                text = self.strip(text, "(fixed)")
                self.tkroot.clipboard_clear()
                self.tkroot.clipboard_append(text.lower())
def load():
     root = Tk()
     root.withdraw()
     root.filename =  filedialog.askopenfilename(title = "choose your file",
                  filetypes = [("JSON files", ".json"),("All files", "*")])
     pbackup = None
     sbackup = None
     with open(root.filename, 'r') as myfile:
          data=myfile.read()
     try:
          d = jsonLoad(data)
          pbackup = Point.points[1:]
          sbackup = Stick.sticks[:]
          del Point.points[1:]
          del Stick.sticks[:]
          for p in d['points']:
               a = Point(Vector2(p['pos']['x'], p['pos']['y']), p['size'], p['pinned'])
               if p['isMotor']:
                    a.motorize(Vector2(p['spinPos']['x'], p['spinPos']['y']), p['spinSpeed'], p['currentSpin'])
                    a.currentSpin = p['currentSpin']
          for s in d['lines']:
               b = Stick(Point.points[s['p1']], Point.points[s['p2']], s['visible'])
     except Exception:
          messagebox.showinfo("Error", "Invalid JSON")
          if pbackup and sbackup:
               Point.points += pbackup
               Stick.sticks += sbackup
Beispiel #9
0
    def output(self, reportfile):
        fp = open(reportfile, "w")
        fp.write("<?xml version=\"1.0\" ?>\n")
        fp.write("<UIMacro>\n")

        r = Tk()
        r.withdraw()
        r.clipboard_clear()    

        items = ["Build", "Benchmark", "Scenario", "Test", "Index", "Key", "ResponseTime", "FPS"]
        actions = self._Data.findall("UIAction")
        for action in actions:
            s = "    <UIAction"
            for item in items:
                s += " %s=\"%s\"" %(item , action.get(item))
            s += "/>\n"
            fp.write(s)

            #Send the data to the clipboard
            line=action.get(items[0])
            for item in items[1:]:
                line += "\t" + action.get(item)
            line += "\n"
            r.clipboard_append(line)

        r.destroy()

        fp.write("</UIMacro>")
        fp.close()
Beispiel #10
0
def main():
    logging_level = logging.DEBUG
    logging.Formatter.converter = time.gmtime
    log_format = '%(asctime)-15s %(levelname)s:%(message)s'
    logging.basicConfig(format=log_format, datefmt='%Y/%m/%d %H:%M:%S UTC', level=logging_level,
                        handlers=[logging.FileHandler('testparsedasbin.log'), logging.StreamHandler()])
    logging.info('_____ Started _____')

    t_step = 5 #60
    home = os.path.expanduser('~')
    start_dir = home
    status = []
    root = Tk()
    root.withdraw()  # this will hide the main window
    list_of_bin_files = filedialog.askopenfilenames(filetypes=('binary {*.bin}', 'Binary files'),
                                                    initialdir = start_dir, initialfile = '')

    for i in list_of_bin_files:
        logging.info('processing ' + i)
        status.append(pdb.parse_bin_files_to_text_files(in_filename=i, verbose_flag=True, dtm_format=True,
                                                        time_step = t_step))
        if status[-1] < 256:
            if status[-1] > 0:
                messagebox.showwarning('Warning', 'Parsing of ' + i + ' ended with warning code ' + str(status[-1])
                                       + '.')
        else:
            messagebox.showerror('Error', 'Parsing of ' + i + ' ended with error code '+ str(status[-1]) + '!')


    logging.info('______ Ended ______')
Beispiel #11
0
def getFilenames(title, types=[], initialdir=None):
    root = Tk()
    root.withdraw()
    filenames = filedialog.askopenfilenames(title=title, filetypes=types,
                                            initialdir=initialdir)
    root.destroy()
    return root.tk.splitlist(filenames)
Beispiel #12
0
def SelectDataFiles():
    """Select the files to compress into a JAM"""
    # Draw (then withdraw) the root Tk window
    logging.info("Drawing root Tk window")
    root = Tk()
    logging.info("Withdrawing root Tk window")
    root.withdraw()

    # Overwrite root display settings
    logging.info("Overwrite root settings to basically hide it")
    root.overrideredirect(True)
    root.geometry('0x0+0+0')

    # Show window again, lift it so it can recieve the focus
    # Otherwise, it is behind the console window
    root.deiconify()
    root.lift()
    root.focus_force()

    # The files to be compressed
    jam_files = filedialog.askdirectory(
        parent=root,
        title="Where are the extracted LEGO.JAM files located?"
    )

    if not jam_files:
        root.destroy()
        colors.text("\nCould not find a JAM archive to compress!",
                    color.FG_LIGHT_RED)
        main()

    # Compress the JAM
    root.destroy()
    BuildJAM(jam_files)
Beispiel #13
0
    def enable(self, app=None):
        """Enable event loop integration with Tk.

        Parameters
        ----------
        app : toplevel :class:`Tkinter.Tk` widget, optional.
            Running toplevel widget to use.  If not given, we probe Tk for an
            existing one, and create a new one if none is found.

        Notes
        -----
        If you have already created a :class:`Tkinter.Tk` object, the only
        thing done by this method is to register with the
        :class:`InputHookManager`, since creating that object automatically
        sets ``PyOS_InputHook``.
        """
        if app is None:
            try:
                from tkinter import Tk  # Py 3
            except ImportError:
                from Tkinter import Tk  # Py 2
            app = Tk()
            app.withdraw()
            self.manager.apps[GUI_TK] = app
            return app
Beispiel #14
0
def installer():
    if not sys.platform.startswith('win'):
        raise OSError

    CSIDL_PERSONAL = 5  # My Documents
    SHGFP_TYPE_CURRENT = 0  # Get current, not default value
    buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)
    ctypes.windll.shell32.SHGetFolderPathW(None, CSIDL_PERSONAL, None, SHGFP_TYPE_CURRENT, buf)

    full_path = buf.value + r"\Averager"
    if full_path == os.getcwd():
        return
    try:
        shutil.copytree(os.getcwd(), full_path)
    except FileExistsError:
        pass

    desktop = winshell.desktop()
    path = os.path.join(desktop, "Averager.lnk")
    target = full_path + r"\Averager.exe"
    wDir = full_path
    icon = full_path + r"\favicon.ico"

    shell = Dispatch('WScript.Shell')
    shortcut = shell.CreateShortCut(path)
    shortcut.Targetpath = target
    shortcut.WorkingDirectory = wDir
    shortcut.IconLocation = icon
    shortcut.save()
    root = Tk()
    root.withdraw()
    messagebox.showinfo("Congrats!", "You're all set up! You'll\nfind a shortcut on your desktop")
    sys.exit(0)
Beispiel #15
0
    def add_extra_content_to_data_table(self, data_table):

        # Read in multiple data tables
        data_table_names = []
        # while True:
        #     data_table_name = input("Which database tables are you going to pull data from (press Enter to end): ")
        #     if data_table_name == "":
        #         break
        #     data_table_names.append(data_table_name)
        root = Tk()
        root.withdraw()

        data_table_name = simpledialog.askstring("Action",
                                                 "Add datatable name for this template. Press cancel to end inputting")

        while True:
            if data_table_name is None:
                break
            data_table_names.append(data_table_name)
            data_table_name = simpledialog.askstring("Action",
                                                     "Add another datatable name for this template. Press cancel to end inputting")

        root.destroy()
        content = ["", data_table.DATA_TABLE_KEY] + data_table_names
        data_table.content.append(content)
        self.source.data_table_names = data_table_names
def main() :
    """Code to read a file and assess its adherence to standards"""
    global fileDict
    
    fileDict = {}
    print("\nOpening a file—look for dialog box:")
    sys.stdout.flush()
    win = Tk() # root window for tkinter file dialog
    win.attributes('-topmost', True)
    win.withdraw() # hide root window
    win.lift() # move to front
    filename = filedialog.askopenfilename(parent=win) # get file name
    #            message="Pick a Python file to check") # label on dialog box
    win.destroy() # get rid of root window
    print("Opening file %s, assumed to be a Python file\n" % filename)
    analyzeFile(filename)

    if DEBUG_LEVEL > 0 :
        tokenParts = set(name for name in dir(tokenize) if name.isupper() 
                 and type(eval("tokenize." + name))==int) # tokenize constants
        tokenCodes = sorted([(eval("tokenize." + name),name) for 
                 name in tokenParts]) # codes corresponding to tokenParts
        print("\nTable of token codes")    
        for code, name in tokenCodes:
            print("%5d %s" % (code, name))
                                                         
    showTermination()
    return
Beispiel #17
0
def savePreset(main, filename=None):

    if filename is None:
        root = Tk()
        root.withdraw()
        filename = simpledialog.askstring(title='Save preset',
                                          prompt='Save config file as...')
        root.destroy()

    if filename is None:
        return

    config = configparser.ConfigParser()
    fov = 'Field of view'
    config['Camera'] = {
        'Frame Start': main.frameStart,
        'Shape': main.shape,
        'Shape name': main.tree.p.param(fov).param('Shape').value(),
        'Horizontal readout rate': str(main.HRRatePar.value()),
        'Vertical shift speed': str(main.vertShiftSpeedPar.value()),
        'Clock voltage amplitude': str(main.vertShiftAmpPar.value()),
        'Frame Transfer Mode': str(main.FTMPar.value()),
        'Cropped sensor mode': str(main.cropParam.value()),
        'Set exposure time': str(main.expPar.value()),
        'Pre-amp gain': str(main.PreGainPar.value()),
        'EM gain': str(main.GainPar.value())}

    with open(os.path.join(main.presetDir, filename), 'w') as configfile:
        config.write(configfile)

    main.presetsMenu.addItem(filename)
def getFileInput():
	root = Tk()
	root.withdraw()
	wordFile = filedialog.askopenfile(title="Open Word File", mode="r", parent=root)
	if wordFile is None:
		raise SystemExit
	return wordFile
    def save():
        """This function will save the file.
        It will also save the day, too!

        You just might be safe with this one!"""
        from tkinter import Tk
        import tkinter.filedialog
        root = Tk()
        root.withdraw()
        fd = tkinter.filedialog.FileDialog(master=root,title='Project Celestia')
        savefile = fd.go()
        if savefile != None:
            import os.path, tkinter.dialog
            if os.path.exists(savefile):
                if os.path.isdir(savefile):
                    tkinter.filedialog.FileDialog.master.bell()
                    return savefile
                d = tkinter.dialog.Dialog(master=None,title='Hold your horses!',
                                          text='Are you sure you want to rewrite this file?'
                                          '\nI mean, I have already seen the file before...\n'
                                          '\n-Twilight Sparkle',
                                          bitmap='questhead',default=0,strings=('Eeyup','Nah'))
                if d.num != 1:
                    return savefile
        else:
            FlashSentry.save_error()
        root.destroy()
Beispiel #20
0
 def clip(password):
     """Instantiate a Tk() object to clip string in argument."""
     tkhandle = Tk()
     tkhandle.withdraw()
     tkhandle.clipboard_clear()
     tkhandle.clipboard_append(password)
     print("\nPassword copied in CLEAR in your clipboard.\n")
def main():
	root = Tk()
	root.withdraw()
	with filedialog.askopenfile(mode = "r", parent = root) as inputData:
		students = []
		for line in inputData:
			firstName, lastName = line.split(',')
			lastName = lastName.strip()
			scores = []
			scores = lastName.split('  ')	
			lastName = scores.pop(0)
			while '' in scores:
				scores.remove('')
			for item in scores:
				if ' ' in item:
					if ' ' in item[:1]:
						newItem = item[1:]
						scores.insert(scores.index(item), newItem)
						scores.remove(item)
						item = newItem
					if "100" in item:
						first, second = item.split(' ')
						first = first.strip()
						second = second.strip()
						scores.insert(scores.index(item), first)
						scores.insert(scores.index(item) + 1, second)
						scores.remove(item)
					else:
						scores[scores.index(item)] = item.replace(' ', '')
			students.append(Student(firstName, lastName, scores))
		students = sortList(students)	
		longestNameLen = findLongestName(students)
		output(students, longestNameLen, os.path.basename(inputData.name))
Beispiel #22
0
    def before_write_instru_info(self, instru_info: InstruInfoComponent):
        super().before_write_instru_info(instru_info)
        root = Tk()
        root.withdraw()

        name = simpledialog.askstring("Action", "Enter NDAR instrument name (without version and not case sensitive)")
        version = simpledialog.askstring("Action", "Enter NDAR instrument version")
        respondent = simpledialog.askstring("Action", "Enter the respondent for this instrument. (e.g, twin, cotwin)")

        if name is not None:
            # No input check right now
            name = name.lower()
            instru_info.instru_info.instru_name = name
            self.instru_info.instru_name = name

        if version is not None:
            # A '0' is added to the version string because NDAR requires
            # single digit versions numbers to have a leading '0'
            if len(version) == 1:
                version = "0" + version
            instru_info.instru_info.version = version
            self.instru_info.version = version

        if respondent is not None:
            instru_info.instru_info.respondent = respondent
            self.instru_info.respondent = respondent

        root.destroy()
Beispiel #23
0
def getFilename(title, types, initialdir=None):
    root = Tk()
    root.withdraw()
    filename = filedialog.askopenfilename(title=title, filetypes=types,
                                          initialdir=initialdir)
    root.destroy()
    return filename
Beispiel #24
0
def priorityOpen():
    root = Tk()
    root.withdraw()
    root.update()
    filePath = askopenfilename(initialdir = "", filetypes = (("Excel Files", "*.xls"),("Excel Files","*.xlsx")),title = "Choose a new Priority Excel File")
    root.destroy()

    return filePath
Beispiel #25
0
def _and_print(x):
    from tkinter import Tk

    r = Tk()
    r.withdraw()
    r.clipboard_clear()
    r.clipboard_append(x)
    print(x)
 def export_image(self):
     f = self.canvas.get_display().View.View().GetObject()
     # print f.Export("tetesdrf.svg", Graphic3d_EF_SVG)
     root = Tk()
     root.withdraw()
     root.destroy()
     print(self.canvas.get_display().View.Dump("0000_export_main.png"))
     pass
Beispiel #27
0
def dirOpen():
    root = Tk()
    root.withdraw()
    root.update()
    filePath = askdirectory(initialdir = "", title = "Choose the RDY folder destination")
    root.destroy()

    return filePath
Beispiel #28
0
def rdyOpen():
    root = Tk()
    root.withdraw()
    root.update()
    filePath = askopenfilename(initialdir = "", filetypes = (("RDY Files", "*.rdy"),("All Files","*.RDY")),title = "Choose a new RDY File")
    root.destroy()

    return filePath
Beispiel #29
0
class AdminRun(object):
    """Invokes the RunAsAdmin helper utility"""

    def __init__(self):
        """Draw (then withdraw) root Tkinter window"""
        self.main = Tk()
        self.main.withdraw()
        self.main.iconbitmap(const.app_icon)

    def launch(self, messages):
        """Relaunch PatchIt! with administrator rights"""

        # Display any message(s) properly
        if messages[1:] == []:
            end = ""
        else:
            end = "".join(messages[1:])

        __admin = askyesno("Relaunch PatchIt?",
                           '''{0}
Would you like to relaunch PatchIt! with Administrator rights? {1}'''.format(
                           messages[0], end))

        # User does not want to relaunch PatchIt!
        if not __admin:
            logging.warning("User does not want to relaunch with Admin rights")
            self.main.destroy()
            return False
        # If user wants to relaunch
        else:
            logging.info("User wants to relaunch with Admin rights")

            # This is the raw Python script. RunAsAdmin will not work
            if (const.exe_name.endswith("py") or
                    const.exe_name.endswith("pyw")):
                logging.warning('''This is a raw Python script ({0})
RunAsAdmin.exe cannot operate!'''.format(const.exe_name))

                showerror("Running Error!",
                          '''You are running a raw Python script ({0}).
RunAsAdmin will not work at all.'''.format(const.exe_name))
                self.main.destroy()
                return False

            # Launch RunAsAdmin to reload PatchIt!
            else:
                logging.info('''This is an exe ({0}).
Launching RunAsAdmin.exe'''.format(const.exe_name))

                subprocess.call(
                    [os.path.join(const.app_folder, "RunAsAdmin.exe"),
                        const.exe_name])

                # Now we close PatchIt!, and let RunAsAdmin take over
                # (that is, if this is an exe)
                self.main.destroy()
                logging.shutdown()
                raise SystemExit(0)
Beispiel #30
0
def powerCalibration(th=None, initialdir=None):
    root = Tk()
    root.withdraw()
    savename = simpledialog.askstring(title='Saving',
                                      prompt='Save files with prefix...')
    root.destroy()

    sigma, folder = analyzeBeam(savename, initialdir)
    return intensityCalibration(sigma, savename, folder)
Beispiel #31
0
def plot_tdr_measurement(plot_settings):
    # Creates and readies a Tkinter object for GUI.
    root = Tk()
    root.withdraw()

    plt.ion()

    # Set TDR constants.
    z_0 = 50
    c = 299792458
    vf = 0.695
    vs = c * vf

    print("Select non-biased s2p file")

    # Prompts user to select non-biased TDR measurement file.
    # Updates Tkinter object.
    no_amp_file = askopenfilename()
    root.update()

    time.sleep(1)

    print("Select biased s2p file")

    # Prompts user to select biased TDR measurement file.
    # Updates Tkinter object.
    with_amp_file = askopenfilename()
    root.update()

    time.sleep(1)

    save_file_name = get_save_file_name()

    print("Please select save location for plots.\n")

    # Prompts user to select directory to save plot(s).
    save_directory = askdirectory()
    # Destroy Tkinter object.
    root.destroy()

    no_amp_network = rf.Network(no_amp_file)
    with_amp_network = rf.Network(with_amp_file)

    no_amp_t = no_amp_network.frequency.t
    with_amp_t = with_amp_network.frequency.t

    no_amp_d = no_amp_t * vs / 2
    with_amp_d = with_amp_t * vs / 2

    no_amp_gamma1 = np.real(no_amp_network.s11.s_time[:, 0, 0])
    with_amp_gamma1 = np.real(with_amp_network.s11.s_time[:, 0, 0])
    no_amp_gamma2 = np.real(no_amp_network.s22.s_time[:, 0, 0])
    with_amp_gamma2 = np.real(with_amp_network.s22.s_time[:, 0, 0])

    no_amp_z_1 = z_0 * (1 + no_amp_gamma1) / (1 - no_amp_gamma1)
    with_amp_z_1 = z_0 * (1 + with_amp_gamma1) / (1 - with_amp_gamma1)
    no_amp_z_2 = z_0 * (1 + no_amp_gamma2) / (1 - no_amp_gamma2)
    with_amp_z_2 = z_0 * (1 + with_amp_gamma2) / (1 - with_amp_gamma2)

    plt.figure()
    plt.plot(no_amp_d, no_amp_z_1, label='No Amplifier Biased')
    plt.plot(with_amp_d, with_amp_z_1, label='With Amplifier Biased')
    plt.xlim([-0.25, 3.75 / 2])
    plt.xlabel('Distance [m]', fontsize=16)
    plt.ylabel('Impedance [Ohm]', fontsize=16)
    plt.title('RF Line 2, Port 1/RF In TDR', fontsize=16)
    plt.legend()

    plt.savefig(save_directory + "/" + save_file_name + "Port_1_RF_IN")

    plt.figure()
    plt.plot(no_amp_d, no_amp_z_2, label='No Amplifier Biased')
    plt.plot(with_amp_d, with_amp_z_2, label='With Amplifier Biased')
    plt.xlim([-0.25, 3.75 / 2])
    plt.xlabel('Distance [m]', fontsize=16)
    plt.ylabel('Impedance [Ohm]', fontsize=16)
    plt.title('RF Line 2, Port 2/RF Out TDR', fontsize=16)
    plt.legend()

    plt.savefig(save_directory + "/" + save_file_name + "Port_2_RF_OUT")
class BarcodeConverter:
    def __init__(self,*args,**kwargs):
        self.root=Tk()
        self.root.withdraw()
        self.cd = os.path.realpath(__file__)[:-19]
        try:
            ksd=sorted([el for el in os.listdir() if el.endswith(".xlsx")])
            rn=self.cd+ksd[0]
            inputWorkbook=xlrd.open_workbook(rn, on_demand = True)
            bk=inputWorkbook.sheet_by_index(0)
            columnsdf=list()
            for il in range(0,1):
                for ik in range(0,bk.ncols):
                    val=bk.cell(il, ik).value 
                    columnsdf.append(val)
            d1=pd.DataFrame(columns=columnsdf)
            for il in range(1,bk.nrows):
                for ik in range(0,bk.ncols):
                    if type(bk.cell(il, ik).value)==str: 
                        d1.loc[il-1,columnsdf[ik]]=bk.cell(il, ik).value.strip()
                    else:
                        d1.loc[il-1,columnsdf[ik]]=bk.cell(il, ik).value
            inputWorkbook.release_resources()
            
            os.chdir(self.cd+"Files\\")
            skipf=int(d1.loc[0,columnsdf[0]])
            skipll=int(d1.loc[0,columnsdf[len(columnsdf)-1]])
            filechain=[fel for fel in os.listdir() if fel.lower().endswith(".csv")]
            for el in filechain:
                with open(el, errors="ignore") as fp:
                    Lines = fp.readlines()
                    Linesf=list()
                    subscol=d1[columnsdf[1]].unique().tolist()
                    for il,line in enumerate(Lines):
                        lf=line.split(",")
                        l=[bel.replace("\"","") for bel in lf]
                        if il<=skipf-1:
                            Linesf.append(line)
                        elif all([il>skipf-1,il<len(Lines)-skipll]):
                            rt=0
                            for gdel in subscol:
                                if l[int(gdel)-1] in d1[d1[columnsdf[1]]==int(gdel)][columnsdf[2]].values.tolist():
                                    rt=1
                                    l[int(gdel)-1]="\""+d1[(d1[columnsdf[1]]==int(gdel))&(d1[columnsdf[2]]==l[int(gdel)-1])][columnsdf[3]].values[0]+"\""
                                    Linesf.append(self.record_strings(l,lf))
                                    break
                            if rt==0:
                                Linesf.append(line)
                        elif il>=len(Lines)-skipll:
                            Linesf.append(line)
                        
                    with open("converted_"+el[:-4]+".CSV", mode="w", errors="ignore") as fw:
                        fw.writelines(Linesf)
        except:
            self.error_W()
        self.root.destroy()
        
    def record_strings(self,*args):
        lg=[bel.replace("\"","") for bel in args[0]]
        for iel,bel in enumerate(lg):
            try:
                float(bel)
            except:
                if "\n" in bel:
                    bel="\""+bel[:-1]+"\""+bel[-1:]
                elif all([args[1][iel]=='',args[1][iel]!="\"\""]):
                    bel=args[0][iel]
                else:
                    bel="\""+bel+"\""
            lg[iel]=bel
        resl=",".join(lg)
        return resl

    def error_W(self):
        self.top = tk.Toplevel()
        self.top.tk.call('wm', 'iconphoto', self.top._w,tk.PhotoImage(file=self.cd+"ICON.png"))
        self.top.resizable(0,0)
        self.top.title('Error: Entrada Incorrecta')
        msg=tk.Message(self.top,text="La estructura de la tabla de entrada .xlsx no es la esperada por el programa, no hay correspondencia de las columnas especificadas en el .xlsx con las del archivo de entrada que debe ser .CSV, las última y primera columna del .xlsx no tienen todos los valores iguales, o algún otro elemento está incorrectamente definido.",width=500)
        msg.grid(padx=0,pady=0)
        self.top.mainloop()
	
    def error_importing(self):
        self.top = tk.Toplevel()
        self.top.tk.call('wm', 'iconphoto', self.top._w,tk.PhotoImage(file=self.cd+"ICON.png"))
        self.top.resizable(0,0)
        self.top.title('Error de importación')
        msg=tk.Message(self.top,text="Por favor, instale las librerías de python pandas, tkinter y xlrd para que el script pueda funcionar correctamente.",width=500)
        msg.grid(padx=0,pady=0)
        self.top.mainloop()
Beispiel #33
0
class TimesheetFormatter:
    def __init__(self):
        self.root = Tk()
        self.root.overrideredirect(1)
        self.root.withdraw()

    ###################################
    ##  Driver
    ###################################

    def format(self):

        timesheetFile = self.getTimesheetFile()
        if not self.validateTimeSheet(timesheetFile):
            return
        else:
            timesheetData = self.loadTimeSheetInfo(timesheetFile)

        employeeFile = self.getEmployeeFile()
        if not self.validateEmployeeInfo(employeeFile):
            return
        else:
            employeeData = self.loadEmployeeInfo(employeeFile)

        self.separate(timesheetData)

        dayLog = self.createDayLog(timesheetData)

        self.writeFormattedOutput(dayLog, employeeData,
                                  timesheetFile[:-4] + "_output.csv")

    ###################################
    ##  Business Logic
    ###################################

    # splits two-day shifts into two separate timesheet entries in the original
    # timesheetData
    def separate(self, timesheetData):

        for employee in timesheetData:
            logs = timesheetData[employee]

            for i in range(len(logs)):
                log = logs[i]
                startDate = log[0]
                endDate = log[2]

                if startDate.day != endDate.day:
                    startTime = log[1]
                    endTime = log[3]

                    startDate = startDate.combine(startDate, startTime.time())
                    midNight = endDate
                    endDate = endDate.combine(endDate, endTime.time())

                    new1 = [
                        startDate, startTime, startDate,
                        time(23, 59),
                        round((midNight - startDate).total_seconds() / 3600, 2)
                    ]

                    new2 = [
                        endDate,
                        time(0, 0), endDate, endTime,
                        round((endDate - midNight).total_seconds() / 3600, 2)
                    ]

                    logs.pop(i)
                    logs.insert(i, new2)
                    logs.insert(i, new1)

    # returns a mapping of each employee's id to their hours worked for each day
    def createDayLog(self, timesheetData):

        dayData = {}

        for employee in timesheetData:
            logs = timesheetData[employee]
            dayLog = {}

            for log in logs:
                day = log[0].date()

                if day not in dayLog:
                    dayLog[day] = 0

                dayLog[day] += log[4]

            dayData[employee] = dayLog

        return dayData

    ###################################
    ##  Writing / Ouput
    ###################################

    def writeFormattedOutput(self, dayLog, employeeData, outputFile):
        with open(outputFile, 'w+', newline='') as f:
            writer = csv.writer(f)
            writer.writerow([
                "Employee ID", "State Number", "Job Code", "Pay Code", "Date",
                "Hours Logged"
            ])

            errors = []

            for employee in dayLog:
                if employee in employeeData:
                    for date in sorted(dayLog[employee]):
                        output = [
                            employee, employeeData[employee][0],
                            employeeData[employee][1],
                            employeeData[employee][2],
                            date.strftime("%m/%d/%Y"),
                            str(round(dayLog[employee][date], 2))
                        ]

                        writer.writerow(output)
                else:
                    errors.append(employee)

        if len(errors) == 0:
            messagebox.showinfo("Completed", "Done!")
        else:
            messagebox.showwarning("Completed",
                                "Partial Completion: The following employees have logged time " + \
                                "but are not in your employee information file." +
                                "\n\n" + str(errors))

    def printTimesheet(self, timesheetData):

        for employee in timesheetData:
            logs = timesheetData[employee]

            for i in range(len(logs)):
                log = logs[i]

                output = employee + " " + log[0].strftime("%m/%d/%Y") + " " + log[1].strftime("%I:%M %p") + \
                         " " + log[2].strftime("%m/%d/%Y") + " " + log[3].strftime("%I:%M %p") + " " + str(log[4])

                print(output)

    ###################################
    ##  Loading
    ###################################

    def loadEmployeeInfo(self, employeeFile):
        with open(employeeFile) as f:
            reader = csv.reader(f)
            employeeData = {}
            next(reader)
            for row in reader:
                employeeData[row[0]] = row[1:]

        return employeeData

    def loadTimeSheetInfo(self, timesheetFile):
        with open(timesheetFile) as f:
            reader = csv.reader(f)
            timesheetData = {}
            next(reader)
            for row in reader:

                if row[0] not in timesheetData:
                    timesheetData[row[0]] = []

                timesheetData[row[0]].append([
                    datetime.strptime(row[1], "%m/%d/%Y"),
                    datetime.strptime(row[2], "%I:%M %p"),
                    datetime.strptime(row[3], "%m/%d/%Y"),
                    datetime.strptime(row[4], "%I:%M %p"),
                    float(row[5])
                ])
        return timesheetData

    ###################################
    ##  VALIDATION
    ###################################

    def validateEmployeeInfo(self, employeeFile):
        with open(employeeFile) as f:
            reader = csv.reader(f)
            labels = next(reader)

            for label in labels:
                for char in label:
                    if char.isdigit():
                        messagebox.showerror(
                            "Error: Timesheet Info",
                            "It does not appear that the employee ID file has proper labels!\n\n"
                            "The first row of the csv file should have the following labels on line 1:\n\n"
                            "Employee ID,State Number,Job Code,Pay Code")

                        return False

            for i, row in enumerate(reader):
                if not len(row) == 4 or not row[0].isdigit() or len(
                        row[1]) != 6:
                    messagebox.showerror(
                        "Error: Employee Info",
                        "Incorrect Employee Info on Line " + str(i + 2) +
                        " of " + employeeFile)
                    return False
        return True

    def validateTimeSheet(self, timesheetFile):
        error = 0

        with open(timesheetFile) as f:
            reader = csv.reader(f)
            labels = next(reader)

            for label in labels:
                for char in label:
                    if char.isdigit():
                        messagebox.showerror(
                            "Error: Timesheet Info",
                            "It does not appear that the timesheet has proper labels!\n\n"
                            "The first row of the csv file should have the following labels on line 1:\n\n"
                            "Employee Id,Clock-In Date,Clock-In Time,Clock-Out Date, Clock-In Time,Hours Worked"
                        )

                        return False

            for i, row in enumerate(reader):
                if not len(row) == 6 or not row[0].isdigit() or not (
                        row[5].isdigit()
                        or isinstance(literal_eval(row[5]), float)):
                    pass
                else:
                    try:
                        error = 2
                        datetime.strptime(row[1], "%m/%d/%Y")
                        error = 4
                        datetime.strptime(row[3], "%m/%d/%Y")
                        error = 3
                        datetime.strptime(row[2], "%I:%M %p")
                        error = 5
                        datetime.strptime(row[4], "%I:%M %p")
                    except ValueError as err:

                        if (error == 2 or error == 4):
                            format = "mm/dd/yyyy"
                        else:
                            format = "hh:mm pm"

                        messagebox.showerror(
                            "Error: Timesheet Info",
                            "Incorrect Timesheet Info on Line " + str(i + 2) +
                            " Column " + str(error) + " of " + timesheetFile +
                            ".\n\n Correct format should be " + format + ".")
                        return False
        return True

    ###################################
    ##  Getting the file paths
    ###################################

    def getTimesheetFile(self):
        messagebox.showinfo(
            "Select Timesheet Information",
            "In the next prompt, select your TIMESHEET INFORMATION.")
        timesheetFile = filedialog.askopenfilename()
        if not timesheetFile:
            exit()

        return timesheetFile

    def getEmployeeFile(self):
        messagebox.showinfo(
            "Select Employee Information",
            "In the next prompt, select your EMPLOYEE INFORMATION.")

        employeeFile = filedialog.askopenfilename()
        if not employeeFile:
            exit()

        return employeeFile
 def select_directory(self):
     from tkinter import filedialog
     from tkinter import Tk
     root = Tk()
     root.withdraw()
     self.dir_to_find = filedialog.askdirectory()
Beispiel #35
0
def get_folder():
    win = Tk()
    win.withdraw()
    win.wm_attributes('-topmost', 1)
    folder = askdirectory(parent=win)
    return folder
Beispiel #36
0
def main(argv):
    config = read_parser(argv, Inputs, InputsOpt_Defaults)

    if config['mode'] == 'mode1_depe':

        print('Select MASTER Features xls')
        root = Tk()
        root.withdraw()
        root.update()
        filepath = filedialog.askopenfilename()
        root.destroy()

        mydict = pd.read_excel(filepath)
        rownames = list(mydict.index.values)
        length_data = len(rownames)
        length_train = int(config['train'] * len(rownames))
        length_test = length_data - length_train

        mydict = mydict.to_dict(orient='list')
        newdict = {}
        for key, values in mydict.items():
            newdict[key] = movil_avg(mydict[key], config['n_mov_avg'])

        Features_Train = []
        for i in range(length_train):
            example = []
            for feature in config['feature_array']:
                example.append(newdict[feature][i])
            Features_Train.append(example)
        Features_Train = np.array(Features_Train)

        Features_Test = []
        for i in range(length_test):
            example = []
            for feature in config['feature_array']:
                example.append(newdict[feature][i + length_train])
            Features_Test.append(example)
        Features_Test = np.array(Features_Test)

        Features = []
        for i in range(length_data):
            example = []
            for feature in config['feature_array']:
                example.append(newdict[feature][i])
            Features.append(example)
        Features = np.array(Features)

        print(len(Features))
        print(len(Features_Test))
        print(len(Features_Train))

        # plt.plot(Features_Train)
        # plt.show()
        # sys.exit()

        scaler_model = StandardScaler()
        scaler_model.fit(Features_Train)
        Features_Train = scaler_model.transform(Features_Train)
        Features_Test = scaler_model.transform(Features_Test)

        scaler_model_full = StandardScaler()
        scaler_model_full.fit(Features)
        Features = scaler_model_full.transform(Features)

        # plt.plot(Features)
        # plt.show()

        nn_fus = MLPRegressor(hidden_layer_sizes=config['layers_fus'],
                              activation=config['activation_fus'],
                              solver=config['solver_fus'],
                              alpha=config['alpha_fus'],
                              random_state=config['rs_fus'])

        nn_fus.fit(X=Features_Train, y=np.linspace(0, 1, length_train))
        fused_train = nn_fus.predict(Features_Train)
        fused_test = nn_fus.predict(Features_Test)

        nn_fus_full = MLPRegressor(hidden_layer_sizes=config['layers_fus'],
                                   activation=config['activation_fus'],
                                   solver=config['solver_fus'],
                                   alpha=config['alpha_fus'],
                                   random_state=config['rs_fus'])
        nn_fus_full.fit(X=Features, y=np.linspace(0, 1, length_data))
        fused = nn_fus_full.predict(Features)

        nn_pro = MLPRegressor(hidden_layer_sizes=config['layers_pro'],
                              activation=config['activation_pro'],
                              solver=config['solver_pro'],
                              alpha=config['alpha_pro'],
                              random_state=config['rs_pro'])

        n_pre = int(config['n_pre'] * length_train)
        m_post = int(config['m_post'] * length_train)
        n_ex = length_train + 1 - n_pre - m_post

        print('+++++++++++++Info: Input points n = ', n_pre)
        print('+++++++++++++Info: Output points m = ', m_post)
        print('+++++++++++++Info: Training examples = ', n_ex)
        a = input('enter to continue...')

        T_Inputs = []
        T_Outputs = []
        for k in range(n_ex):
            T_Inputs.append(fused_train[k:k + n_pre])
            T_Outputs.append(fused_train[k + n_pre:k + n_pre + m_post])

        nn_pro.fit(T_Inputs, T_Outputs)
        print(T_Inputs)

        fused_predict = []
        it_fused = list(fused_train)

        for k in range(length_test + m_post - 1):
            P_Input = it_fused[n_ex + k + 1:n_ex + n_pre + k + 1]
            # print(P_Input)
            # sys.exit()
            P_Output = nn_pro.predict([P_Input])
            P_Output = P_Output[0]

            fused_predict.append(P_Output[-1])
            it_fused.append(P_Output[-1])

        fused_predict = np.array(fused_predict[:-(m_post - 1)])

        # plt.plot(fused_predict, 'r', fused_test, 'b')

        x_full = np.arange((len(fused)))
        x_train = np.arange((len(fused_train)))
        x_predict = np.linspace(len(fused_train),
                                len(fused),
                                num=len(fused_test),
                                endpoint=False)

        plt.plot(x_full, fused, 'b', x_predict, fused_predict, 'r', x_train,
                 fused_train, 'k')
        plt.show()

        error = 0
        for i in range(len(fused_predict)):
            error += (fused_predict[i] - fused[length_train + i])**2.0
        error_final = np.absolute(fused_predict[length_test - 1] -
                                  fused[length_data - 1])
        print('error= ', error)
        print('error_final= ', error_final)

    elif config['mode'] == 'mode2':

        if config['source_file'] == 'OFF':
            print('Select MASTER Features xls')
            root = Tk()
            root.withdraw()
            root.update()
            filepath = filedialog.askopenfilename()
            root.destroy()
        else:
            filepath = 'C:\\Felix\\29_THESIS\\Analysis\\LAST_MASTER_AE_Features.xlsx'

        mydict = pd.read_excel(filepath)
        rownames = list(mydict.index.values)
        length_data = len(rownames)
        length_train = int(config['train'] * len(rownames))
        length_test = length_data - length_train

        mydict = mydict.to_dict(orient='list')
        newdict = {}
        for key, values in mydict.items():
            newdict[key] = movil_avg(mydict[key], config['n_mov_avg'])

        Features_Train = []
        for i in range(length_train):
            example = newdict[config['feature']][i]
            Features_Train.append(example)
        Features_Train = np.array(Features_Train)

        Features_Test = []
        for i in range(length_test):
            example = newdict[config['feature']][i + length_train]
            Features_Test.append(example)
        Features_Test = np.array(Features_Test)

        Features = []
        for i in range(length_data):
            example = newdict[config['feature']][i]
            Features.append(example)
        Features = np.array(Features)

        print(len(Features))
        print(len(Features_Test))
        print(len(Features_Train))

        # scaler_model = StandardScaler()
        # scaler_model.fit(Features_Train)
        # Features_Train = scaler_model.transform(Features_Train)
        # Features_Test = scaler_model.transform(Features_Test)

        # scaler_model_full = StandardScaler()
        # scaler_model_full.fit(Features)
        # Features = scaler_model_full.transform(Features)

        # plt.plot(Features)
        # plt.show()

        # nn_fus = MLPRegressor(hidden_layer_sizes=config['layers_fus'], activation=config['activation_fus'], solver=config['solver_fus'], alpha=config['alpha_fus'], random_state=config['rs_fus'])

        # nn_fus.fit(X=Features_Train, y=np.linspace(0, 1, length_train))
        # fused_train = nn_fus.predict(Features_Train)
        # fused_test = nn_fus.predict(Features_Test)

        # nn_fus_full = MLPRegressor(hidden_layer_sizes=config['layers_fus'], activation=config['activation_fus'], solver=config['solver_fus'], alpha=config['alpha_fus'], random_state=config['rs_fus'])
        # nn_fus_full.fit(X=Features, y=np.linspace(0, 1, length_data))
        # fused = nn_fus_full.predict(Features)

        fused_train = np.ravel(Features_Train)
        fused_test = np.ravel(Features_Test)
        fused = np.ravel(Features)

        n_pre = int(config['n_pre'] * length_train)
        m_post = int(config['m_post'] * length_train)
        n_ex = length_train + 1 - n_pre - m_post

        print('+++++++++++++Info: Input points n = ', n_pre)
        print('+++++++++++++Info: Output points m = ', m_post)
        print('+++++++++++++Info: Training examples = ', n_ex)
        # a = input('enter to continue...')

        if config['layers_pro'][0] == 0:
            print('Auto config of layers')
            List_Layers = list(
                np.arange(m_post, n_pre, int((n_pre - m_post) * 0.1)))
            # List_Layers = [50, 100]
        else:
            print('Auto config of layers IS not optional')
            sys.exit()

        ERROR = []
        ERROR_FINAL = []
        # print(List_Layers)
        # print(int((n_pre-m_post)*0.1))
        # print(np.arange(n_pre, m_post, int((n_pre-m_post)*0.1)))
        # sys.exit()
        List_RSs = [1]

        for layers in List_Layers:
            rs_error = 0
            rs_error_final = 0
            for rs_pro in List_RSs:
                nn_pro = MLPRegressor(hidden_layer_sizes=layers,
                                      activation=config['activation_pro'],
                                      solver=config['solver_pro'],
                                      alpha=config['alpha_pro'],
                                      random_state=rs_pro)

                T_Inputs = []
                T_Outputs = []
                for k in range(n_ex):
                    T_Inputs.append(fused_train[k:k + n_pre])
                    T_Outputs.append(fused_train[k + n_pre:k + n_pre + m_post])

                nn_pro.fit(T_Inputs, T_Outputs)
                print(T_Inputs)

                fused_predict = []
                it_fused = list(fused_train)

                for k in range(length_test + m_post - 1):
                    P_Input = it_fused[n_ex + k + 1:n_ex + n_pre + k + 1]
                    # print(P_Input)
                    # sys.exit()
                    P_Output = nn_pro.predict([P_Input])
                    P_Output = P_Output[0]

                    fused_predict.append(P_Output[-1])
                    it_fused.append(P_Output[-1])

                fused_predict = np.array(fused_predict[:-(m_post - 1)])

                # plt.plot(fused_predict, 'r', fused_test, 'b')

                x_full = np.arange((len(fused)))
                x_train = np.arange((len(fused_train)))
                x_predict = np.linspace(len(fused_train),
                                        len(fused),
                                        num=len(fused_test),
                                        endpoint=False)

                # plt.plot(x_full, fused, 'b', x_predict, fused_predict, 'r', x_train, fused_train, 'k')
                # plt.show()

                error = 0
                for i in range(len(fused_predict)):
                    error += (fused_predict[i] - fused[length_train + i])**2.0
                error_final = np.absolute(fused_predict[length_test - 1] -
                                          fused[length_data - 1])
                print('error= ', error)
                print('error_final= ', error_final)

                rs_error += error
                rs_error_final += error_final

            ERROR.append(rs_error / len(List_RSs))
            ERROR_FINAL.append(rs_error_final / len(List_RSs))

        mydict = {'Error_Final': ERROR_FINAL, 'Error': ERROR}
        writer = pd.ExcelWriter('regre_' + config['name'] + '.xlsx')
        DataFr = pd.DataFrame(data=mydict, index=List_Layers)
        DataFr.to_excel(writer, sheet_name='Result')
        writer.close()

        mydict = {
            'alpha': config['alpha_pro'],
            'solver': config['solver_pro'],
            'activation': config['activation_pro'],
            'rs': config['rs_pro'],
            'n_pre': config['n_pre'],
            'm_post': config['m_post'],
            'n_mov_avg': config['n_mov_avg'],
            'train': config['train'],
            'feature': config['feature']
        }
        writer = pd.ExcelWriter('config_' + config['name'] + '.xlsx')
        DataFr = pd.DataFrame(data=mydict, index=['value'])
        DataFr.to_excel(writer, sheet_name='Result')
        writer.close()

    elif config['mode'] == 'mode2b':

        if config['source_file'] == 'OFF':
            print('Select MASTER Features xls')
            root = Tk()
            root.withdraw()
            root.update()
            filepath = filedialog.askopenfilename()
            root.destroy()
        else:
            filepath = 'C:\\Felix\\29_THESIS\\Analysis\\LAST_MASTER_AE_Features.xlsx'

        mydict = pd.read_excel(filepath)
        rownames = list(mydict.index.values)
        length_data = len(rownames)
        length_train = int(config['train'] * len(rownames))
        length_test = length_data - length_train

        mydict = mydict.to_dict(orient='list')
        newdict = {}
        for key, values in mydict.items():
            newdict[key] = movil_avg(mydict[key], config['n_mov_avg'])

        Features_Train = []
        for i in range(length_train):
            example = newdict[config['feature']][i]
            Features_Train.append(example)
        Features_Train = np.array(Features_Train)

        Features_Test = []
        for i in range(length_test):
            example = newdict[config['feature']][i + length_train]
            Features_Test.append(example)
        Features_Test = np.array(Features_Test)

        Features = []
        for i in range(length_data):
            example = newdict[config['feature']][i]
            Features.append(example)
        Features = np.array(Features)

        print(len(Features))
        print(len(Features_Test))
        print(len(Features_Train))

        # scaler_model = StandardScaler()
        # scaler_model.fit(Features_Train)
        # Features_Train = scaler_model.transform(Features_Train)
        # Features_Test = scaler_model.transform(Features_Test)

        # scaler_model_full = StandardScaler()
        # scaler_model_full.fit(Features)
        # Features = scaler_model_full.transform(Features)

        # plt.plot(Features)
        # plt.show()

        # nn_fus = MLPRegressor(hidden_layer_sizes=config['layers_fus'], activation=config['activation_fus'], solver=config['solver_fus'], alpha=config['alpha_fus'], random_state=config['rs_fus'])

        # nn_fus.fit(X=Features_Train, y=np.linspace(0, 1, length_train))
        # fused_train = nn_fus.predict(Features_Train)
        # fused_test = nn_fus.predict(Features_Test)

        # nn_fus_full = MLPRegressor(hidden_layer_sizes=config['layers_fus'], activation=config['activation_fus'], solver=config['solver_fus'], alpha=config['alpha_fus'], random_state=config['rs_fus'])
        # nn_fus_full.fit(X=Features, y=np.linspace(0, 1, length_data))
        # fused = nn_fus_full.predict(Features)

        fused_train = np.ravel(Features_Train)
        fused_test = np.ravel(Features_Test)
        fused = np.ravel(Features)

        n_pre = int(config['n_pre'] * length_train)
        m_post = int(config['m_post'] * length_train)
        n_ex = length_train + 1 - n_pre - m_post

        print('+++++++++++++Info: Input points n = ', n_pre)
        print('+++++++++++++Info: Output points m = ', m_post)
        print('+++++++++++++Info: Training examples = ', n_ex)
        # a = input('enter to continue...')

        # if config['layers_pro'][0] == 0:
        # print('Auto config of layers')
        # List_Layers = list(np.arange(m_post, n_pre, int((n_pre-m_post)*0.1)))
        # # List_Layers = [50, 100]
        # else:
        # print('Auto config of layers IS not optional')
        # sys.exit()

        ERROR = []
        ERROR_FINAL = []
        # print(List_Layers)
        # print(int((n_pre-m_post)*0.1))
        # print(np.arange(n_pre, m_post, int((n_pre-m_post)*0.1)))
        # sys.exit()
        # List_RSs = [1]

        rs_error = 0
        rs_error_final = 0
        nn_pro = MLPRegressor(hidden_layer_sizes=config['layers_pro'],
                              activation=config['activation_pro'],
                              solver=config['solver_pro'],
                              alpha=config['alpha_pro'],
                              random_state=config['rs_pro'])

        T_Inputs = []
        T_Outputs = []
        for k in range(n_ex):
            T_Inputs.append(fused_train[k:k + n_pre])
            T_Outputs.append(fused_train[k + n_pre:k + n_pre + m_post])

        nn_pro.fit(T_Inputs, T_Outputs)
        print(T_Inputs)

        fused_predict = []
        it_fused = list(fused_train)

        for k in range(length_test + m_post - 1):
            P_Input = it_fused[n_ex + k + 1:n_ex + n_pre + k + 1]
            # print(P_Input)
            # sys.exit()
            P_Output = nn_pro.predict([P_Input])
            P_Output = P_Output[0]

            fused_predict.append(P_Output[-1])
            it_fused.append(P_Output[-1])

        fused_predict = np.array(fused_predict[:-(m_post - 1)])

        # plt.plot(fused_predict, 'r', fused_test, 'b')

        x_full = np.arange((len(fused)))
        x_train = np.arange((len(fused_train)))
        x_predict = np.linspace(len(fused_train),
                                len(fused),
                                num=len(fused_test),
                                endpoint=False)

        error = 0
        for i in range(len(fused_predict)):
            error += (fused_predict[i] - fused[length_train + i])**2.0
        error_final = np.absolute(fused_predict[length_test - 1] -
                                  fused[length_data - 1])
        print('error= ', error)
        print('error_final= ', error_final)

        plt.plot(x_full, fused, 'b', x_predict, fused_predict, 'r', x_train,
                 fused_train, 'k')
        plt.show()

        # rs_error += error
        # rs_error_final += error_final

        # ERROR.append(rs_error/len(List_RSs))
        # ERROR_FINAL.append(rs_error_final/len(List_RSs))

        # mydict = {'Error_Final':ERROR_FINAL, 'Error':ERROR}
        # writer = pd.ExcelWriter('regre_' + config['name'] + '.xlsx')
        # DataFr = pd.DataFrame(data=mydict, index=List_Layers)
        # DataFr.to_excel(writer, sheet_name='Result')
        # writer.close()

        # mydict = {'alpha':config['alpha_pro'], 'solver':config['solver_pro'], 'activation':config['activation_pro'], 'rs':config['rs_pro'], 'n_pre':config['n_pre'], 'm_post':config['m_post'], 'n_mov_avg':config['n_mov_avg'], 'train':config['train'], 'feature':config['feature']}
        # writer = pd.ExcelWriter('config_' + config['name'] + '.xlsx')
        # DataFr = pd.DataFrame(data=mydict, index=['value'])
        # DataFr.to_excel(writer, sheet_name='Result')
        # writer.close()

    elif config[
            'mode'] == 'mode2c':  #para run automatico, con train, valid... el test no se usa

        if config['source_file'] == 'OFF':
            print('Select MASTER Features xls')
            root = Tk()
            root.withdraw()
            root.update()
            filepath = filedialog.askopenfilename()
            root.destroy()
        else:
            filepath = 'C:\\Felix\\29_THESIS\\Analysis\\LAST_MASTER_AE_Features.xlsx'

        mydict = pd.read_excel(filepath)
        rownames = list(mydict.index.values)
        length_data = len(rownames)
        length_train = int(config['train'] * len(rownames))
        length_test = length_data - length_train

        mydict = mydict.to_dict(orient='list')
        newdict = {}
        for key, values in mydict.items():
            newdict[key] = movil_avg(mydict[key], config['n_mov_avg'])

        Full_Feature = newdict[config['feature']]
        # Full_Feature = np.arange(100)
        # TV_Feature = Full_Feature[0:int(config['valid']*len(Full_Feature))]

        Features = np.array(Full_Feature)
        Features_Train = np.array(Full_Feature[0:int(config['train'] *
                                                     len(Full_Feature))])
        Features_Valid = np.array(
            Full_Feature[int(config['train'] *
                             len(Full_Feature)):int(config['train'] *
                                                    len(Full_Feature)) +
                         int(config['valid'] * len(Full_Feature))])

        Features_Test = np.array(
            Full_Feature[int(config['train'] * len(Full_Feature)) +
                         int(config['valid'] * len(Full_Feature)):])

        # Features_Train = []
        # for i in range(length_train):
        # example = TV_Feature[i]
        # Features_Train.append(example)
        # Features_Train = np.array(Features_Train)

        # Features_Test = []
        # for i in range(length_test):
        # example = TV_Feature[i+length_train]
        # Features_Test.append(example)
        # Features_Test = np.array(Features_Test)

        # Features = []
        # for i in range(length_data):
        # example = TV_Feature[i]
        # Features.append(example)
        # Features = np.array(Features)

        # print(len(Features))
        # print(len(Features_Test))
        # print(len(Features_Train))

        # scaler_model = StandardScaler()
        # scaler_model.fit(Features_Train)
        # Features_Train = scaler_model.transform(Features_Train)
        # Features_Test = scaler_model.transform(Features_Test)

        # scaler_model_full = StandardScaler()
        # scaler_model_full.fit(Features)
        # Features = scaler_model_full.transform(Features)

        # plt.plot(Features)
        # plt.show()

        # nn_fus = MLPRegressor(hidden_layer_sizes=config['layers_fus'], activation=config['activation_fus'], solver=config['solver_fus'], alpha=config['alpha_fus'], random_state=config['rs_fus'])

        # nn_fus.fit(X=Features_Train, y=np.linspace(0, 1, length_train))
        # fused_train = nn_fus.predict(Features_Train)
        # fused_test = nn_fus.predict(Features_Test)

        # nn_fus_full = MLPRegressor(hidden_layer_sizes=config['layers_fus'], activation=config['activation_fus'], solver=config['solver_fus'], alpha=config['alpha_fus'], random_state=config['rs_fus'])
        # nn_fus_full.fit(X=Features, y=np.linspace(0, 1, length_data))
        # fused = nn_fus_full.predict(Features)

        fused_train = np.ravel(Features_Train)
        fused_valid = np.ravel(Features_Valid)

        fused_test = np.ravel(Features_Test)
        fused = np.ravel(Features)
        # print(fused_train)
        # print(fused_valid)
        # print(fused_test)
        # sys.exit()

        n_pre = int(config['n_pre'] * length_train)
        m_post = int(config['m_post'] * length_train)
        n_ex = length_train + 1 - n_pre - m_post

        print('+++++++++++++Info: Input points n = ', n_pre)
        print('+++++++++++++Info: Output points m = ', m_post)
        print('+++++++++++++Info: Training examples = ', n_ex)
        # a = input('enter to continue...')

        if config['layers_pro'][0] == 0:
            print('Auto config of layers')
            List_Layers = list(
                np.arange(m_post, n_pre, int((n_pre - m_post) * 0.1)))
            # List_Layers = [50, 100]
        else:
            print('Auto config of layers IS not optional')
            sys.exit()

        ERROR = []
        ERROR_FINAL = []
        # print(List_Layers)
        # print(int((n_pre-m_post)*0.1))
        # print(np.arange(n_pre, m_post, int((n_pre-m_post)*0.1)))
        # sys.exit()
        List_RSs = [1, 7, 12]

        for layers in List_Layers:
            rs_error = 0
            rs_error_final = 0
            for rs_pro in List_RSs:
                nn_pro = MLPRegressor(hidden_layer_sizes=layers,
                                      activation=config['activation_pro'],
                                      solver=config['solver_pro'],
                                      alpha=config['alpha_pro'],
                                      random_state=rs_pro,
                                      max_iter=100000)

                T_Inputs = []
                T_Outputs = []
                for k in range(n_ex):
                    T_Inputs.append(fused_train[k:k + n_pre])
                    T_Outputs.append(fused_train[k + n_pre:k + n_pre + m_post])

                nn_pro.fit(T_Inputs, T_Outputs)
                print(T_Inputs)

                fused_predict = []
                it_fused = list(fused_train)

                for k in range(len(fused_valid) + m_post - 1):
                    P_Input = it_fused[n_ex + k + 1:n_ex + n_pre + k + 1]
                    # print(P_Input)
                    # sys.exit()
                    P_Output = nn_pro.predict([P_Input])
                    P_Output = P_Output[0]

                    fused_predict.append(P_Output[-1])
                    it_fused.append(P_Output[-1])

                fused_predict = np.array(fused_predict[:-(m_post - 1)])

                # plt.plot(fused_predict, 'r', fused_test, 'b')

                # x_full = np.arange((len(fused)))
                # x_train = np.arange((len(fused_train)))
                # x_predict = np.linspace(len(fused_train), len(fused), num=len(fused_test), endpoint=False)

                # plt.plot(x_full, fused, 'b', x_predict, fused_predict, 'r', x_train, fused_train, 'k')
                # plt.show()

                error = 0
                for i in range(len(fused_predict)):
                    error += (fused_predict[i] - fused_valid[i])**2.0
                # print(fused_predict)
                # print(fused_valid)
                error_final = np.absolute(fused_predict[-1] - fused_valid[-1])
                print('error= ', error)
                print('error_final= ', error_final)

                rs_error += error
                rs_error_final += error_final

            ERROR.append(rs_error / len(List_RSs))
            ERROR_FINAL.append(rs_error_final / len(List_RSs))

        mydict = {'Error_Final': ERROR_FINAL, 'Error': ERROR}
        writer = pd.ExcelWriter('train_' + str(config['train']) + '_regre_' +
                                config['name'] + '.xlsx')
        DataFr = pd.DataFrame(data=mydict, index=List_Layers)
        DataFr.to_excel(writer, sheet_name='Result')
        writer.close()

        mydict = {
            'alpha': config['alpha_pro'],
            'solver': config['solver_pro'],
            'activation': config['activation_pro'],
            'rs': config['rs_pro'],
            'n_pre': config['n_pre'],
            'm_post': config['m_post'],
            'n_mov_avg': config['n_mov_avg'],
            'train': config['train'],
            'feature': config['feature'],
            'valid': config['valid']
        }
        writer = pd.ExcelWriter('train_' + str(config['train']) + '_config_' +
                                config['name'] + '.xlsx')
        DataFr = pd.DataFrame(data=mydict, index=['value'])
        DataFr.to_excel(writer, sheet_name='Result')
        writer.close()

    elif config['mode'] == 'mode2d':  #para run manual, con train, valid y test

        if config['source_file'] == 'OFF':
            print('Select MASTER Features xls')
            root = Tk()
            root.withdraw()
            root.update()
            filepath = filedialog.askopenfilename()
            root.destroy()
        else:
            filepath = 'C:\\Felix\\29_THESIS\\Analysis\\LAST_MASTER_AE_Features.xlsx'

        mydict = pd.read_excel(filepath)
        rownames = list(mydict.index.values)
        length_data = len(rownames)
        length_train = int(config['train'] * len(rownames))
        length_test = length_data - length_train

        mydict = mydict.to_dict(orient='list')
        newdict = {}
        for key, values in mydict.items():
            newdict[key] = movil_avg(mydict[key], config['n_mov_avg'])

        Full_Feature = newdict[config['feature']]
        # Full_Feature = np.arange(100)
        # TV_Feature = Full_Feature[0:int(config['valid']*len(Full_Feature))]

        Features = np.array(Full_Feature)
        Features_Train = np.array(Full_Feature[0:int(config['train'] *
                                                     len(Full_Feature))])
        Features_Valid = np.array(
            Full_Feature[int(config['train'] *
                             len(Full_Feature)):int(config['train'] *
                                                    len(Full_Feature)) +
                         int(config['valid'] * len(Full_Feature))])

        Features_Test = np.array(
            Full_Feature[int(config['train'] * len(Full_Feature)) +
                         int(config['valid'] * len(Full_Feature)):])

        # Features_Train = []
        # for i in range(length_train):
        # example = TV_Feature[i]
        # Features_Train.append(example)
        # Features_Train = np.array(Features_Train)

        # Features_Test = []
        # for i in range(length_test):
        # example = TV_Feature[i+length_train]
        # Features_Test.append(example)
        # Features_Test = np.array(Features_Test)

        # Features = []
        # for i in range(length_data):
        # example = TV_Feature[i]
        # Features.append(example)
        # Features = np.array(Features)

        # print(len(Features))
        # print(len(Features_Test))
        # print(len(Features_Train))

        # scaler_model = StandardScaler()
        # scaler_model.fit(Features_Train)
        # Features_Train = scaler_model.transform(Features_Train)
        # Features_Test = scaler_model.transform(Features_Test)

        # scaler_model_full = StandardScaler()
        # scaler_model_full.fit(Features)
        # Features = scaler_model_full.transform(Features)

        # plt.plot(Features)
        # plt.show()

        # nn_fus = MLPRegressor(hidden_layer_sizes=config['layers_fus'], activation=config['activation_fus'], solver=config['solver_fus'], alpha=config['alpha_fus'], random_state=config['rs_fus'])

        # nn_fus.fit(X=Features_Train, y=np.linspace(0, 1, length_train))
        # fused_train = nn_fus.predict(Features_Train)
        # fused_test = nn_fus.predict(Features_Test)

        # nn_fus_full = MLPRegressor(hidden_layer_sizes=config['layers_fus'], activation=config['activation_fus'], solver=config['solver_fus'], alpha=config['alpha_fus'], random_state=config['rs_fus'])
        # nn_fus_full.fit(X=Features, y=np.linspace(0, 1, length_data))
        # fused = nn_fus_full.predict(Features)

        fused_train = np.ravel(Features_Train)
        fused_valid = np.ravel(Features_Valid)

        fused_test = np.ravel(Features_Test)
        fused = np.ravel(Features)
        # print(fused_train)
        # print(fused_valid)
        # print(fused_test)
        # sys.exit()

        n_pre = int(config['n_pre'] * length_train)
        m_post = int(config['m_post'] * length_train)
        n_ex = length_train + 1 - n_pre - m_post

        print('+++++++++++++Info: Input points n = ', n_pre)
        print('+++++++++++++Info: Output points m = ', m_post)
        print('+++++++++++++Info: Training examples = ', n_ex)
        # a = input('enter to continue...')

        # if config['layers_pro'][0] == 0:
        # print('Auto config of layers')
        # List_Layers = list(np.arange(m_post, n_pre, int((n_pre-m_post)*0.1)))
        # # List_Layers = [50, 100]
        # else:
        # print('Auto config of layers IS not optional')
        # sys.exit()

        # print(List_Layers)
        # print(int((n_pre-m_post)*0.1))
        # print(np.arange(n_pre, m_post, int((n_pre-m_post)*0.1)))
        # sys.exit()

        nn_pro = MLPRegressor(hidden_layer_sizes=config['layers_pro'],
                              activation=config['activation_pro'],
                              solver=config['solver_pro'],
                              alpha=config['alpha_pro'],
                              random_state=config['rs_pro'],
                              max_iter=100000)

        T_Inputs = []
        T_Outputs = []
        for k in range(n_ex):
            T_Inputs.append(fused_train[k:k + n_pre])
            T_Outputs.append(fused_train[k + n_pre:k + n_pre + m_post])

        nn_pro.fit(T_Inputs, T_Outputs)
        print(T_Inputs)

        fused_predict_valid = []
        it_fused = list(fused_train)

        for k in range(len(fused_valid) + m_post - 1):
            P_Input = it_fused[n_ex + k + 1:n_ex + n_pre + k + 1]
            # print(P_Input)
            # sys.exit()
            P_Output = nn_pro.predict([P_Input])
            P_Output = P_Output[0]

            fused_predict_valid.append(P_Output[-1])
            it_fused.append(P_Output[-1])

        fused_predict_valid = np.array(fused_predict_valid[:-(m_post - 1)])

        # plt.plot(fused_predict, 'r', fused_test, 'b')

        # x_full = np.arange((len(fused)))
        # x_train = np.arange((len(fused_train)))
        # x_predict = np.linspace(len(fused_train), len(fused), num=len(fused_test), endpoint=False)

        # plt.plot(x_full, fused, 'b', x_predict, fused_predict, 'r', x_train, fused_train, 'k')
        # plt.show()

        error = 0
        for i in range(len(fused_predict_valid)):
            error += (fused_predict_valid[i] - fused_valid[i])**2.0
        # print(fused_predict)
        # print(fused_valid)
        error_final = np.absolute(fused_predict_valid[-1] - fused_valid[-1])
        print('error validacion= ', error)
        print('error_final validacion= ', error_final)

        fused_predict_test = []
        it_fused = list(fused_train) + list(fused_valid)

        for k in range(len(fused_test) + m_post - 1):
            P_Input = it_fused[len(fused_valid) + n_ex + k +
                               1:len(fused_valid) + n_ex + n_pre + k + 1]
            # print(P_Input)
            # sys.exit()
            P_Output = nn_pro.predict([P_Input])
            P_Output = P_Output[0]

            fused_predict_test.append(P_Output[-1])
            it_fused.append(P_Output[-1])

        fused_predict_test = np.array(fused_predict_test[:-(m_post - 1)])

        error = 0
        for i in range(len(fused_predict_test)):
            error += (fused_predict_test[i] - fused_test[i])**2.0
        # print(fused_predict)
        # print(fused_valid)
        error_final = np.absolute(fused_predict_test[-1] - fused_test[-1])
        print('error test= ', error)
        print('error_final test= ', error_final)

        x_full = np.arange((len(fused)))
        x_train = np.arange((len(fused_train)))
        x_valid = np.linspace(len(fused_train),
                              len(fused_train) + len(fused_valid),
                              num=len(fused_valid),
                              endpoint=False)
        x_test = np.linspace(len(fused_train) + len(fused_valid),
                             len(fused_train) + len(fused_valid) +
                             len(fused_test),
                             num=len(fused_test),
                             endpoint=False)

        plt.plot(x_full, fused, 'b', x_train, fused_train, 'k', x_valid,
                 fused_predict_valid, 'r', x_test, fused_predict_test, 'm')
        plt.show()

        # mydict = {'Error_Final':ERROR_FINAL, 'Error':ERROR}
        # writer = pd.ExcelWriter('train_' +  str(config['train']) + '_regre_' + config['name'] + '.xlsx')
        # DataFr = pd.DataFrame(data=mydict, index=List_Layers)
        # DataFr.to_excel(writer, sheet_name='Result')
        # writer.close()

        # mydict = {'alpha':config['alpha_pro'], 'solver':config['solver_pro'], 'activation':config['activation_pro'], 'rs':config['rs_pro'], 'n_pre':config['n_pre'], 'm_post':config['m_post'], 'n_mov_avg':config['n_mov_avg'], 'train':config['train'], 'feature':config['feature'], 'valid':config['valid']}
        # writer = pd.ExcelWriter('train_' + str(config['train']) + '_config_' + config['name'] + '.xlsx')
        # DataFr = pd.DataFrame(data=mydict, index=['value'])
        # DataFr.to_excel(writer, sheet_name='Result')
        # writer.close()

    elif config[
            'mode'] == 'mode2e':  #para run manual, con train y test-- el valid es para entrenar

        if config['source_file'] == 'OFF':
            print('Select MASTER Features xls')
            root = Tk()
            root.withdraw()
            root.update()
            filepath = filedialog.askopenfilename()
            root.destroy()
        else:
            filepath = 'C:\\Felix\\29_THESIS\\Analysis\\LAST_MASTER_AE_Features.xlsx'

        mydict = pd.read_excel(filepath)
        rownames = list(mydict.index.values)
        length_data = len(rownames)
        length_train = int((config['train'] + config['valid']) * len(rownames))
        length_test = length_data - length_train

        mydict = mydict.to_dict(orient='list')
        newdict = {}
        for key, values in mydict.items():
            newdict[key] = movil_avg(mydict[key], config['n_mov_avg'])

        Full_Feature = newdict[config['feature']]
        # Full_Feature = np.arange(100)
        # TV_Feature = Full_Feature[0:int(config['valid']*len(Full_Feature))]

        Features = np.array(Full_Feature)
        Features_Train = np.array(
            Full_Feature[0:int((config['train'] + config['valid']) *
                               len(Full_Feature))])

        Features_Test = np.array(
            Full_Feature[int(config['train'] * len(Full_Feature)) +
                         int(config['valid'] * len(Full_Feature)):])

        # Features_Train = []
        # for i in range(length_train):
        # example = TV_Feature[i]
        # Features_Train.append(example)
        # Features_Train = np.array(Features_Train)

        # Features_Test = []
        # for i in range(length_test):
        # example = TV_Feature[i+length_train]
        # Features_Test.append(example)
        # Features_Test = np.array(Features_Test)

        # Features = []
        # for i in range(length_data):
        # example = TV_Feature[i]
        # Features.append(example)
        # Features = np.array(Features)

        # print(len(Features))
        # print(len(Features_Test))
        # print(len(Features_Train))

        # scaler_model = StandardScaler()
        # scaler_model.fit(Features_Train)
        # Features_Train = scaler_model.transform(Features_Train)
        # Features_Test = scaler_model.transform(Features_Test)

        # scaler_model_full = StandardScaler()
        # scaler_model_full.fit(Features)
        # Features = scaler_model_full.transform(Features)

        # plt.plot(Features)
        # plt.show()

        # nn_fus = MLPRegressor(hidden_layer_sizes=config['layers_fus'], activation=config['activation_fus'], solver=config['solver_fus'], alpha=config['alpha_fus'], random_state=config['rs_fus'])

        # nn_fus.fit(X=Features_Train, y=np.linspace(0, 1, length_train))
        # fused_train = nn_fus.predict(Features_Train)
        # fused_test = nn_fus.predict(Features_Test)

        # nn_fus_full = MLPRegressor(hidden_layer_sizes=config['layers_fus'], activation=config['activation_fus'], solver=config['solver_fus'], alpha=config['alpha_fus'], random_state=config['rs_fus'])
        # nn_fus_full.fit(X=Features, y=np.linspace(0, 1, length_data))
        # fused = nn_fus_full.predict(Features)

        fused_train = np.ravel(Features_Train)
        print(fused_train)
        fused_test = np.ravel(Features_Test)
        fused = np.ravel(Features)

        # fused = np.arange(100)
        # fused_train = np.arange(60)
        # fused_test = np.arange(60, 100, 1)

        # length_data = 100
        # length_train = 60
        # length_test = 40

        print(fused_train)
        # print(fused_test)
        # sys.exit()

        n_pre = int(config['n_pre'] * length_train)
        m_post = int(config['m_post'] * length_train)
        n_ex = length_train + 1 - n_pre - m_post

        print('+++++++++++++Info: Input points n = ', n_pre)
        print('+++++++++++++Info: Output points m = ', m_post)
        print('+++++++++++++Info: Training examples = ', n_ex)
        # a = input('enter to continue...')

        # if config['layers_pro'][0] == 0:
        # print('Auto config of layers')
        # List_Layers = list(np.arange(m_post, n_pre, int((n_pre-m_post)*0.1)))
        # # List_Layers = [50, 100]
        # else:
        # print('Auto config of layers IS not optional')
        # sys.exit()

        # print(List_Layers)
        # print(int((n_pre-m_post)*0.1))
        # print(np.arange(n_pre, m_post, int((n_pre-m_post)*0.1)))
        # sys.exit()

        nn_pro = MLPRegressor(hidden_layer_sizes=config['layers_pro'],
                              activation=config['activation_pro'],
                              solver=config['solver_pro'],
                              alpha=config['alpha_pro'],
                              random_state=config['rs_pro'],
                              max_iter=100000)

        T_Inputs = []
        T_Outputs = []
        for k in range(n_ex):
            T_Inputs.append(fused_train[k:k + n_pre])
            T_Outputs.append(fused_train[k + n_pre:k + n_pre + m_post])

        nn_pro.fit(T_Inputs, T_Outputs)

        fused_predict_test = []
        it_fused = list(fused_train)

        for k in range(len(fused_test) + m_post - 1):
            P_Input = it_fused[n_ex + k + 1:n_ex + n_pre + k + 1]
            # print(P_Input)
            # sys.exit()
            # print(P_Input)
            P_Output = nn_pro.predict([P_Input])
            P_Output = P_Output[0]

            fused_predict_test.append(P_Output[-1])
            it_fused.append(P_Output[-1])

        fused_predict_test = np.array(fused_predict_test[:-(m_post - 1)])

        error = 0
        for i in range(len(fused_predict_test)):
            error += (fused_predict_test[i] - fused_test[i])**2.0
        # print(fused_predict)
        # print(fused_valid)
        error_final = np.absolute(fused_predict_test[-1] - fused_test[-1])
        print('error test= ', error)
        print('error_final test= ', error_final)

        x_full = np.arange((len(fused)))
        x_train = np.arange((len(fused_train)))
        x_predict = np.linspace(len(fused_train),
                                len(fused),
                                num=len(fused_test),
                                endpoint=False)

        plt.plot(x_full, fused, 'b', x_predict, fused_predict_test, 'r',
                 x_train, fused_train, 'k')
        plt.show()

        # mydict = {'Error_Final':ERROR_FINAL, 'Error':ERROR}
        # writer = pd.ExcelWriter('train_' +  str(config['train']) + '_regre_' + config['name'] + '.xlsx')
        # DataFr = pd.DataFrame(data=mydict, index=List_Layers)
        # DataFr.to_excel(writer, sheet_name='Result')
        # writer.close()

        # mydict = {'alpha':config['alpha_pro'], 'solver':config['solver_pro'], 'activation':config['activation_pro'], 'rs':config['rs_pro'], 'n_pre':config['n_pre'], 'm_post':config['m_post'], 'n_mov_avg':config['n_mov_avg'], 'train':config['train'], 'feature':config['feature'], 'valid':config['valid']}
        # writer = pd.ExcelWriter('train_' + str(config['train']) + '_config_' + config['name'] + '.xlsx')
        # DataFr = pd.DataFrame(data=mydict, index=['value'])
        # DataFr.to_excel(writer, sheet_name='Result')
        # writer.close()

    else:
        print('unknown mode')
        sys.exit()

    return
Beispiel #37
0
def testMQ():
    root = Tk()
    root.withdraw()
    a = filedialog.askopenfilename(initialdir = "D:\Thomas\PhytonSCripts\MQOutput" \
           ,title = "Choose a MQ nomatch.deppep file to plot",\
           filetypes = (("deppep files","*.deppep"),("all files","*.*")))
    a = pd.read_table(a, low_memory=False)
    a = a.drop(0).reset_index(drop=True)

    b = filedialog.askopenfilename(initialdir = "D:\Thomas\PhytonSCripts\MQOutput" \
           ,title = "Choose a MQ matching.deppep file to plot",\
           filetypes = (("deppep files","*.deppep"),("all files","*.*")))
    b = pd.read_table(b, low_memory=False)
    b = b.drop(0).reset_index(drop=True)

    arg = a[[
        'Raw file', 'DP Base Raw File', 'DP Proteins', 'DP Base Sequence'
    ]]
    argb = b[[
        'Raw file', 'DP Base Raw File', 'DP Proteins', 'DP Base Sequence'
    ]]

    uniq = arg['DP Base Sequence'].unique()
    uniqb = argb['DP Base Sequence'].unique()
    out = uniq[np.in1d(uniq, uniqb)]
    df = pd.DataFrame({'Raw Files': arg['Raw file'].unique()})
    df['counta'] = 0
    df['countb'] = 0
    count = 0
    countb = 0

    for x in range(len(out)):
        coin = arg[arg['DP Base Sequence'] == out[x]]
        coinb = argb[argb['DP Base Sequence'] == out[x]]
        coincount = np.in1d(coin['Raw file'].unique(),
                            coinb['Raw file'].unique())
        coinbcount = np.in1d(coinb['Raw file'].unique(),
                             coin['Raw file'].unique())
        count = count + coincount[coincount == False].size
        countb = countb + coinbcount[coinbcount == False].size

        for y in (coin['Raw file'].unique()[coincount == False]):
            df['counta'][df['Raw Files'] ==
                         y] = df['counta'][df['Raw Files'] == y] + 1
        for z in (coinb['Raw file'].unique()[coinbcount == False]):
            df['countb'][df['Raw Files'] ==
                         z] = df['countb'][df['Raw Files'] == z] + 1
    df.plot.bar()

    print('Amount of peptide sequences found in the Raw Files of file 1 but not'\
          ' found in the Raw Files of file 2 is ',count, '. For file 2: ', countb)
    print('!!!Both peptide sequences need to be present in both files!!!')
    print('Amount of dp. Peptides in file 1: ', len(a), ' in file 2: ', len(b))

    values = pd.DataFrame(a['Raw file'].value_counts())
    values['Raw'] = b['Raw file'].value_counts().values
    values.columns = ['No match', 'Matching']
    values = values.sort_index()
    values.plot.bar()

    a['Intensity'] = a['Intensity'].astype(float)
    b['Intensity'] = b['Intensity'].astype(float)

    raw_tablea = pd.pivot_table(a, values = 'Intensity',\
    index = ['DP Cluster Index','DP AA','DP Base Sequence','DP Probabilities'], columns = 'Raw file')
    raw_tableb = pd.pivot_table(b, values = 'Intensity',\
    index = ['DP Cluster Index','DP AA','DP Base Sequence','DP Probabilities'], columns = 'Raw file')

    raw_tablea = raw_tablea.reset_index()
    raw_tableb = raw_tableb.reset_index()

    return raw_tablea, raw_tableb
from tensorflow import keras
from tensorflow.keras.models import load_model
from snake import *
import numpy as np
from matplotlib import pyplot as plt
import os
import random
from collections import deque
from tkinter import Tk, filedialog
tk_root = Tk()
tk_root.withdraw()
dir_name = filedialog.askopenfilename()
main_nn = load_model(dir_name)
name = "_".join(dir_name.split('/')[-1].split('_')[0:5])

# Hyperparameters
LEARNING_RATE = 0.001
DISCOUNT_RATE = 0.9
EPOCH = 1
BATCH_SIZE = 100
MAX_SCORE = 40
REPLAY_BUFFER = deque(maxlen=1000)
SAMPLES = []

# CPU only
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"

game = Snake()
show_every = 5
episode = 1
history = {'reward': []}
Beispiel #39
0
    def Stop(self, event):
        self.flag = False


if __name__ == '__main__':
    pp = pprint.PrettyPrinter(indent=4)
    # 10034的O32账号无权限查询8301账户,因此用7043账号查询8301账户
    # account_no = '8302'
    # combi_no = '83023005'
    account_no = '8301'
    combi_no = '8301361'

    y = list()
    root = Tk()
    root.withdraw()  # 提示框主窗口隐藏

    interface = SubInterface('ufx_trading_hhk')
    interface.init()
    positions = interface.query_position(account_no, combi_no)
    interface.subscribe_knock(combi_no)
    interface.pnl_adjusted = 0
    if os.path.exists("pnl_adjusted.pkl"):
        with open("pnl_adjusted.pkl", 'rb') as f:
            interface.pnl_adjusted = pickle.load(f)
    interface.pos_pnl = 0

    fig, ax = plt.subplots()
    plt.subplots_adjust(bottom=0.25)
    ax = plt.gca()
    ax.spines['left'].set_color('none')
"""Date picker dialog.
   Stand-alone example from Tk Assistant.
   stevepython.wordpress.com

requirements:
pip3 install tkcalendar
"""
from tkinter import Button, Tk, Toplevel

from tkcalendar import Calendar

root = Tk()
root.withdraw()  # Hide naff extra window.
root.title('Please choose a date')


def pick_date_dialog():
    """Display date picker dialog, print date selected when OK clicked."""
    def print_sel():
        """Print date selected."""
        selected_date = (cal.get_date())
        print(selected_date)

    top = Toplevel(root)
    # Defaults to today's date.
    cal = Calendar(top,
                   font='Arial 10',
                   background='darkblue',
                   foreground='white',
                   selectmode='day')
Beispiel #41
0
def dir_picker() -> str:
    """询问用户并返回一个文件夹"""
    root = Tk()
    root.withdraw()
    return filedialog.askdirectory()
Beispiel #42
0
            pyautogui.press('\t')

        if '6' in customer['foodOption']:
            pyautogui.typewrite(['space', '\t'])
        else:
            pyautogui.press('\t')

        if '7' in customer['foodOption']:
            pyautogui.typewrite(['space', '\t'])
        else:
            pyautogui.press('\t')

        # Fill out 禁忌 field.
        # pyautogui.typewrite(customer['taboos'] + '\t', 0.25)
        r = Tk()
        r.withdraw()
        r.clipboard_clear()
        r.clipboard_append(customer['taboos'])
        r.update()  # now it stays on the clipboard after the window is closed
        # print(r.clipboard_get())
        time.sleep(1)
        pyautogui.hotkey('command', 'v')
        r.destroy()

        # Fill out 手机号码 field.
        pyautogui.typewrite(customer['contact'] + '\t', 0.25)

        # Click Submit.
        pyautogui.press('enter')

    elif customer['attend'] == '2':
def file_save():
    root = Tk()
    root.withdraw()
    root.attributes("-topmost", True)
    return filedialog.asksaveasfilename(title="Select file",
                                        filetypes=("all files", "*.*"))
Beispiel #44
0
	def get_clipboard_text(self):
		# Initializing `tkinter` and hiding the default window
		root = Tk()
		root.withdraw()
		# Returning text from clipboard
		return root.clipboard_get()
Beispiel #45
0
def main(argv):
	config = read_parser(argv, Inputs, InputsOpt_Defaults)
	

	if config['mode'] == 'pca':

		print('Select MASTER Features xls')
		root = Tk()
		root.withdraw()
		root.update()
		filepath = filedialog.askopenfilename()
		root.destroy()
		

		mydict = pd.read_excel(filepath)
		rownames = list(mydict.index.values)
		length_data = len(rownames)
		
		
		mydict = mydict.to_dict(orient='list')
		

		
		Features = []
		for i in range(length_data):
			example = []
			for feature in config['feature_array']:

				example.append(mydict[feature][i])
			Features.append(example)
		
		Features = np.array(Features)
		# X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
		# print(X)
		# print(np.size(X))
		# print(np.array(Features))
		# print(np.size(np.array(Features)))
		# sys.exit()
		
		# plt.plot(Features)
		# plt.show()
		
		scaler_model = StandardScaler()
		scaler_model.fit(Features)
		Features = scaler_model.transform(Features)
		
		# plt.plot(Features)
		# plt.show()
		
		
		
		pca_model = PCA(n_components=4)
		# pca_model = KernelPCA(n_components=4, kernel='cosine')
		pca_model.fit(Features)
		
		# print(pca_model)
		print(pca_model.explained_variance_)
		new = pca_model.transform(Features)
		
		plt.plot(new)
		plt.show()
	
	
	elif config['mode'] == 'dbn_fuse':

		print('Select MASTER Features xls')
		root = Tk()
		root.withdraw()
		root.update()
		filepath = filedialog.askopenfilename()
		root.destroy()		

		mydict = pd.read_excel(filepath)
		rownames = list(mydict.index.values)
		length_data = len(rownames)
		length_train = int(length_data*config['train'])		
		
		mydict = mydict.to_dict(orient='list')		

		Features_Train = []
		Features_Test = []
		Targets_Train = []
		Targets_Test = []
		for i in range(length_data):			
			example = [1]
			for feature in config['feature_array']:
				# example.append(mydict[feature][i])
				example.append(sigmoid(mydict[feature][i]))				
				
			if i <= length_train:
				Features_Train.append(example)
				# Targets_Train.append([mydict['Target'][i]])
				Targets_Train.append([sigmoid(mydict['Target'][i])])
			else:
				Features_Test.append(example)
				# Targets_Test.append([mydict['Target'][i]])
				Targets_Test.append([sigmoid(mydict['Target'][i])])
		
		Features_Train = np.array(Features_Train)
		Features_Test = np.array(Features_Test)
		
		Targets_Train = np.array(Targets_Train)
		Targets_Test = np.array(Targets_Test)
		
		
		# scaler_model = StandardScaler()
		# scaler_model.fit(Features_Train)
		# Features_Train = scaler_model.transform(Features_Train)
		# Features_Test = scaler_model.transform(Features_Test)
		
		# scaler_target = StandardScaler()
		# scaler_target.fit(Targets_Train)
		# Targets_Train = scaler_target.transform(Targets_Train)
		# Targets_Test = scaler_target.transform(Targets_Test)
		
		# Features_Train = sigmoid(Features_Train)
		# Features_Test = sigmoid(Features_Test)
		# Targets_Train = sigmoid(Targets_Train)
		# Targets_Test = sigmoid(Targets_Test)
		
		
		
		
		rbm = BernoulliRBM(n_components=8, random_state=0, verbose=True, learning_rate=0.6, n_iter=20)
		regressor = MLPRegressor(hidden_layer_sizes=[8] ,random_state=11, verbose=True, activation='identity', alpha=0.01)	

		
		DBN = Pipeline(steps=[('rbm', rbm), ('regressor', regressor)])
		
		DBN.fit(Features_Train, Targets_Train)
		
		Targets_Predict = DBN.predict(Features_Test)
		
		fig, ax = plt.subplots()
		ax.plot(Targets_Test, 'bo')
		ax.plot(Targets_Predict, 'ro')
		plt.show()
	
	
	elif config['mode'] == 'dbn_fuse_2':

		print('Select MASTER Features xls')
		root = Tk()
		root.withdraw()
		root.update()
		filepath = filedialog.askopenfilename()
		root.destroy()		

		mydict = pd.read_excel(filepath)
		rownames = list(mydict.index.values)
		length_data = len(rownames)
		length_train = int(length_data*config['train'])		
		
		mydict = mydict.to_dict(orient='list')		

		Features_Train = []
		Features_Test = []
		Targets_Train = []
		Targets_Test = []
		for i in range(length_data):			
			example = []
			for feature in config['feature_array']:
				example.append(mydict[feature][i])	
				
			if i <= length_train:
				Features_Train.append(example)
				Targets_Train.append([mydict['Target'][i]])
			else:
				Features_Test.append(example)
				Targets_Test.append([mydict['Target'][i]])
		
		Features_Train = np.array(Features_Train)
		Features_Test = np.array(Features_Test)
		
		Targets_Train = np.array(Targets_Train)
		Targets_Test = np.array(Targets_Test)
		
		
		# scaler_model = StandardScaler()
		# scaler_model.fit(Features_Train)
		# Features_Train = scaler_model.transform(Features_Train)
		# Features_Test = scaler_model.transform(Features_Test)
		
		Features_Train = minmax_scale(Features_Train, feature_range=(0,1))
		Features_Test = minmax_scale(Features_Test, feature_range=(0,1))
		
		
		
		
		# scaler_target = StandardScaler()
		# scaler_target.fit(Targets_Train)
		# Targets_Train = scaler_target.transform(Targets_Train)
		# Targets_Test = scaler_target.transform(Targets_Test)
		
		Targets_Train = Targets_Train/100.
		Targets_Test = Targets_Test/100.
		
		
		# plt.plot(Features_Train)
		# plt.show()
		
		
		
		
		# rbm = BernoulliRBM(n_components=2, random_state=1, verbose=False, learning_rate=0.06, n_iter=50)
		
		# regressor = MLPRegressor(hidden_layer_sizes=[2] ,random_state=11, verbose=False, activation='identity', alpha=0.01)	
		# print(rbm.intercept_visible_)
		# print(rbm.intercept_hidden_)
		# print(rbm.components_)

		# rbm.fit(Features_Train)
		# regressor.fit(Features_Train, Targets_Train)
		# print('RBM++++++++++++')
		# print(rbm.intercept_visible_)
		# print(rbm.intercept_hidden_)
		# print(rbm.components_)
		# caca = np.array(rbm.components_)		
		# caca2 = np.transpose(caca)		
		# print(caca)
		# print(caca2)
		# print('MLP++++++++++++')
		# print(regressor.intercepts_)
		# print(regressor.coefs_)
		
		# pelo = [caca2, regressor.coefs_[1]]
		# print('aaaaaa')
		# # print(pelo)
		
		# regressor.coefs_ = pelo
		# print(regressor.coefs_)
		print('RBM FUNCTION++++++++++++')
		_STOCHASTIC_SOLVERS = ['sgd', 'adam']
		class MLPRegressorOverride(MLPRegressor):
			# Overriding _init_coef method
			# def _init_coef(self, fan_in, fan_out):
				# if self.activation == 'logistic':
					# init_bound = np.sqrt(2. / (fan_in + fan_out))
				# elif self.activation in ('identity', 'tanh', 'relu'):
					# init_bound = np.sqrt(6. / (fan_in + fan_out))
				# else:
					# raise ValueError("Unknown activation function %s" %
									 # self.activation)
				
				# # first = add_Brbm(Visible=Features_Train, components=2, rs=1, learning_rate=0.06, verbose=None, n_iter=None)		
				# # second = add_Brbm(Visible=first['hidden'], components=1, rs=5, learning_rate=0.06, verbose=None, n_iter=None)	
				# # Coefs = [first['coefs'], second['coefs']]
				# # Bias = [first['bias'], second['bias']]

				# regressor = MLPRegressor(hidden_layer_sizes=[2] ,random_state=10, verbose=False, activation='identity', alpha=0.01)	
				
				# Tra = np.array([[1., 2., 3., 4.], [4., 2.5, 3., 3.], [5., 2., 3.8, 2.], [4., 5.5, 2., 3.]])
				# Te = np.array([[1.], [1.1], [0.7], [1.9]])
				# regressor.fit(Tra, Te)
				
				# coef_init = regressor.coefs_
				# intercept_init = regressor.intercepts_

				# return coef_init, intercept_init
				
			def _initialize(self, y, layer_units):
				# set all attributes, allocate weights etc for first call
				# Initialize parameters
				self.n_iter_ = 0
				self.t_ = 0
				self.n_outputs_ = y.shape[1]

				# Compute the number of layers
				self.n_layers_ = len(layer_units)

				# Output for regression
				# if not is_classifier(self):
					# self.out_activation_ = 'identity'
				# # Output for multi class
				# elif self._label_binarizer.y_type_ == 'multiclass':
					# self.out_activation_ = 'softmax'
				# # Output for binary class and multi-label
				# else:
					# self.out_activation_ = 'logistic'
				self.out_activation_ = 'identity'
				# Initialize coefficient and intercept layers
				# self.coefs_ = []
				# self.intercepts_ = []

				# for i in range(self.n_layers_ - 1):
					# coef_init, intercept_init = self._init_coef(layer_units[i],
																# layer_units[i + 1])
					# self.coefs_.append(coef_init)
					# self.intercepts_.append(intercept_init)
				
				
				# regressor = MLPRegressor(hidden_layer_sizes=[2] ,random_state=10, verbose=False, activation='identity', alpha=0.01)					
				# Tra = np.array([[1., 2., 3., 4.], [4., 2.5, 3., 3.], [5., 2., 3.8, 2.], [4., 5.5, 2., 3.]])
				# Te = np.array([[1.], [1.1], [0.7], [1.9]])
				# regressor.fit(Tra, Te)				
				# self.coefs_ = regressor.coefs_
				# self.intercepts_ = regressor.intercepts_
				
				
				first = add_Brbm(Visible=Features_Train, components=60, rs=1, learning_rate=0.006, verbose=None, n_iter=None)	
				second = add_Brbm(Visible=first['hidden'], components=20, rs=5, learning_rate=0.006, verbose=None, n_iter=None)
				third = add_Brbm(Visible=second['hidden'], components=1, rs=7, learning_rate=0.006, verbose=None, n_iter=None)					
				Coefs = [first['coefs'], second['coefs'], third['coefs']]
				Bias = [first['bias'], second['bias'], third['bias']]
				self.coefs_ = Coefs
				self.intercepts_ = Bias
				
				
				

				if self.solver in _STOCHASTIC_SOLVERS:
					self.loss_curve_ = []
					self._no_improvement_count = 0
					if self.early_stopping:
						self.validation_scores_ = []
						self.best_validation_score_ = -np.inf
					else:
						self.best_loss_ = np.inf
					
		
		# first = add_Brbm(Visible=Features_Train, components=2, rs=1, learning_rate=0.06, verbose=None, n_iter=None)		
		# second = add_Brbm(Visible=first['hidden'], components=1, rs=5, learning_rate=0.06, verbose=None, n_iter=None)		
		# Coefs = [first['coefs'], second['coefs']]
		# Bias = [first['bias'], second['bias']]
		

		# regressor = MLPRegressor(hidden_layer_sizes=[2] ,random_state=11, verbose=False, activation='identity', alpha=0.01)
		regressor = MLPRegressorOverride(hidden_layer_sizes=[60, 20] ,random_state=11, verbose=False, activation='identity', alpha=0.001)	
		
		
		# regressor.coefs_ = Coefs
		# regressor.intercepts_ = Bias
		
		regressor.partial_fit(Features_Train, Targets_Train)
		
		# print(regressor.coefs_)
		# print(regressor.intercepts_)
		
		# print(Coefs)
		# print(Bias)
		
		
		Targets_Predict = regressor.predict(Features_Test)
		
		
		fig, ax = plt.subplots()
		ax.plot(Targets_Test, 'bo')
		ax.plot(Targets_Predict, 'ro')
		plt.show()
		
		
		sys.exit()
		
		Targets_Predict = rbm.predict(Features_Test)
		
		
	
	elif config['mode'] == 'dbn_fuse_3':
		

		class RBM:
		  
			def __init__(self, num_visible, num_hidden):
				self.num_hidden = num_hidden
				self.num_visible = num_visible
				self.debug_print = True

				# Initialize a weight matrix, of dimensions (num_visible x num_hidden), using
				# a uniform distribution between -sqrt(6. / (num_hidden + num_visible))
				# and sqrt(6. / (num_hidden + num_visible)). One could vary the 
				# standard deviation by multiplying the interval with appropriate value.
				# Here we initialize the weights with mean 0 and standard deviation 0.1. 
				# Reference: Understanding the difficulty of training deep feedforward 
				# neural networks by Xavier Glorot and Yoshua Bengio
				np_rng = np.random.RandomState(1234)

				self.weights = np.asarray(np_rng.uniform(
						low=-0.1 * np.sqrt(6. / (num_hidden + num_visible)),
									high=0.1 * np.sqrt(6. / (num_hidden + num_visible)),
									size=(num_visible, num_hidden)))


				# Insert weights for the bias units into the first row and first column.
				self.weights = np.insert(self.weights, 0, 0, axis = 0)
				self.weights = np.insert(self.weights, 0, 0, axis = 1)

			def train(self, data, max_epochs = 1000, learning_rate = 0.1):
				"""
				Train the machine.
				Parameters
				----------
				data: A matrix where each row is a training example consisting of the states of visible units.    
				"""

				num_examples = data.shape[0]

				# Insert bias units of 1 into the first column.
				data = np.insert(data, 0, 1, axis = 1)

				for epoch in range(max_epochs):      
					# Clamp to the data and sample from the hidden units. 
					# (This is the "positive CD phase", aka the reality phase.)
					pos_hidden_activations = np.dot(data, self.weights)      
					pos_hidden_probs = self._logistic(pos_hidden_activations)
					pos_hidden_probs[:,0] = 1 # Fix the bias unit.
					pos_hidden_states = pos_hidden_probs > np.random.rand(num_examples, self.num_hidden + 1)
					# Note that we're using the activation *probabilities* of the hidden states, not the hidden states       
					# themselves, when computing associations. We could also use the states; see section 3 of Hinton's 
					# "A Practical Guide to Training Restricted Boltzmann Machines" for more.
					pos_associations = np.dot(data.T, pos_hidden_probs)

					# Reconstruct the visible units and sample again from the hidden units.
					# (This is the "negative CD phase", aka the daydreaming phase.)
					neg_visible_activations = np.dot(pos_hidden_states, self.weights.T)
					neg_visible_probs = self._logistic(neg_visible_activations)
					neg_visible_probs[:,0] = 1 # Fix the bias unit.
					neg_hidden_activations = np.dot(neg_visible_probs, self.weights)
					neg_hidden_probs = self._logistic(neg_hidden_activations)
					# Note, again, that we're using the activation *probabilities* when computing associations, not the states 
					# themselves.
					neg_associations = np.dot(neg_visible_probs.T, neg_hidden_probs)

					# Update weights.
					self.weights += learning_rate * ((pos_associations - neg_associations) / num_examples)

					error = np.sum((data - neg_visible_probs) ** 2)
					if self.debug_print:
						print("Epoch %s: error is %s" % (epoch, error))

			def run_visible(self, data):
				"""
				Assuming the RBM has been trained (so that weights for the network have been learned),
				run the network on a set of visible units, to get a sample of the hidden units.
				
				Parameters
				----------
				data: A matrix where each row consists of the states of the visible units.
				
				Returns
				-------
				hidden_states: A matrix where each row consists of the hidden units activated from the visible
				units in the data matrix passed in.
				"""
				
				num_examples = data.shape[0]
				
				# Create a matrix, where each row is to be the hidden units (plus a bias unit)
				# sampled from a training example.
				hidden_states = np.ones((num_examples, self.num_hidden + 1))
				
				# Insert bias units of 1 into the first column of data.
				data = np.insert(data, 0, 1, axis = 1)

				# Calculate the activations of the hidden units.
				hidden_activations = np.dot(data, self.weights)
				# Calculate the probabilities of turning the hidden units on.
				hidden_probs = self._logistic(hidden_activations)
				# Turn the hidden units on with their specified probabilities.
				hidden_states[:,:] = hidden_probs > np.random.rand(num_examples, self.num_hidden + 1)
				# Always fix the bias unit to 1.
				# hidden_states[:,0] = 1
			  
				# Ignore the bias units.
				hidden_states = hidden_states[:,1:]
				return hidden_states
			
		  # TODO: Remove the code duplication between this method and `run_visible`?
			def run_hidden(self, data):
				"""
				Assuming the RBM has been trained (so that weights for the network have been learned),
				run the network on a set of hidden units, to get a sample of the visible units.
				Parameters
				----------
				data: A matrix where each row consists of the states of the hidden units.
				Returns
				-------
				visible_states: A matrix where each row consists of the visible units activated from the hidden
				units in the data matrix passed in.
				"""

				num_examples = data.shape[0]

				# Create a matrix, where each row is to be the visible units (plus a bias unit)
				# sampled from a training example.
				visible_states = np.ones((num_examples, self.num_visible + 1))

				# Insert bias units of 1 into the first column of data.
				data = np.insert(data, 0, 1, axis = 1)

				# Calculate the activations of the visible units.
				visible_activations = np.dot(data, self.weights.T)
				# Calculate the probabilities of turning the visible units on.
				visible_probs = self._logistic(visible_activations)
				# Turn the visible units on with their specified probabilities.
				visible_states[:,:] = visible_probs > np.random.rand(num_examples, self.num_visible + 1)
				# Always fix the bias unit to 1.
				# visible_states[:,0] = 1

				# Ignore the bias units.
				visible_states = visible_states[:,1:]
				return visible_states
			
			def daydream(self, num_samples):
				"""
				Randomly initialize the visible units once, and start running alternating Gibbs sampling steps
				(where each step consists of updating all the hidden units, and then updating all of the visible units),
				taking a sample of the visible units at each step.
				Note that we only initialize the network *once*, so these samples are correlated.
				Returns
				-------
				samples: A matrix, where each row is a sample of the visible units produced while the network was
				daydreaming.
				"""

				# Create a matrix, where each row is to be a sample of of the visible units 
				# (with an extra bias unit), initialized to all ones.
				samples = np.ones((num_samples, self.num_visible + 1))

				# Take the first sample from a uniform distribution.
				samples[0,1:] = np.random.rand(self.num_visible)

				# Start the alternating Gibbs sampling.
				# Note that we keep the hidden units binary states, but leave the
				# visible units as real probabilities. See section 3 of Hinton's
				# "A Practical Guide to Training Restricted Boltzmann Machines"
				# for more on why.
				for i in range(1, num_samples):
					visible = samples[i-1,:]

					# Calculate the activations of the hidden units.
					hidden_activations = np.dot(visible, self.weights)      
					# Calculate the probabilities of turning the hidden units on.
					hidden_probs = self._logistic(hidden_activations)
					# Turn the hidden units on with their specified probabilities.
					hidden_states = hidden_probs > np.random.rand(self.num_hidden + 1)
					# Always fix the bias unit to 1.
					hidden_states[0] = 1

					# Recalculate the probabilities that the visible units are on.
					visible_activations = np.dot(hidden_states, self.weights.T)
					visible_probs = self._logistic(visible_activations)
					visible_states = visible_probs > np.random.rand(self.num_visible + 1)
					samples[i,:] = visible_states

				# Ignore the bias units (the first column), since they're always set to 1.
				return samples[:,1:]        
			  
			def _logistic(self, x):
				return 1.0 / (1 + np.exp(-x))

		r = RBM(num_visible = 6, num_hidden = 6)
		# training_data = np.array([[1,1,1,0,0,0],[1,0,1,0,0,0],[1,1,1,0,0,0],[0,0,1,1,1,0], [0,0,1,1,0,0],[0,0,1,1,1,0]])
		training_data = np.array([[1.1,1,1,0,0,0],[1.3,0,1,0,0,0],[10.,1,1,0,0,0],[0,0,1,1,1,0], [0,0,1,1,0,0],[0,0,1,1,1,0]])
		# training_data = np.array([[10.,11.,13.,0.9,0.,0.4],[21.,10.,31.,0.,0.9,0.],[1.,16.,1.,0.4,0.6,0.6],[0.3,0.,16.,1.,11.,0.], [0.,0.8,1.,1.,0.6,0.7],[10.,16.,15.,1.,1.,0.6]])
		r.train(training_data, max_epochs = 5000)
		print(r.weights)
		user = np.array([[0.,1.,0.,1.,0.1,0.8]])
		print(r.run_visible(user))
		
	
	
	elif config['mode'] == 'dbn_fuse_4':
		print('Select MASTER Features xls')
		root = Tk()
		root.withdraw()
		root.update()
		filepath = filedialog.askopenfilename()
		root.destroy()		

		mydict = pd.read_excel(filepath)
		rownames = list(mydict.index.values)
		length_data = len(rownames)
		length_train = int(length_data*config['train'])		
		
		mydict = mydict.to_dict(orient='list')		

		Features_Train = []
		Features_Test = []
		Targets_Train = []
		Targets_Test = []
		Features = []
		for i in range(length_data):			
			example = [0]
			for feature in config['feature_array']:
				example.append(mydict[feature][i])
			Features.append(example)
			if i <= length_train:
				Features_Train.append(example)
				Targets_Train.append([mydict['Target'][i]])
			else:
				Features_Test.append(example)
				Targets_Test.append([mydict['Target'][i]])
		
		
		Features_Train = np.array(Features_Train)
		Features_Test = np.array(Features_Test)
		Features = np.array(Features)
		
		Targets_Train = np.array(Targets_Train)
		Targets_Test = np.array(Targets_Test)
		
		
		scaler_model = StandardScaler()
		scaler_model.fit(Features_Train)
		Features_Train = scaler_model.transform(Features_Train)
		Features_Test = scaler_model.transform(Features_Test)
		
		scaler_target = StandardScaler()
		scaler_target.fit(Targets_Train)
		Targets_Train = scaler_target.transform(Targets_Train)
		Targets_Test = scaler_target.transform(Targets_Test)
		
		
		Targets = np.array(list(Targets_Train) + list(Targets_Test))
		
		
		pca_model = PCA(n_components=6)
		pca_model.fit(Features_Train)
		

		Features_Train = pca_model.transform(Features_Train)
		Features_Test = pca_model.transform(Features_Test)
		Features = pca_model.transform(Features)
		
		# print(np.ravel(Targets_Train))
		# plt.plot(Targets_Train)
		# plt.show()
		corr = []
		TFeatures_Train = np.transpose(Features_Train)
		TFeatures = np.transpose(Features)
		for feature_pca in TFeatures_Train:

			corr.append(np.corrcoef(np.ravel(feature_pca), np.ravel(Targets_Train))[0][1])		
		print(corr)
		
		plt.plot(Features_Train)
		plt.show()
		
		plt.plot(TFeatures_Train[np.argmax(np.absolute(corr))])
		plt.show()
		
		
		

		# regressor1 = MLPRegressor(hidden_layer_sizes=[3] ,random_state=11, verbose=False, activation='identity', alpha=0.01)
		# # regressor1 = NuSVR(kernel='linear', nu=0.001)
		# # regressor1 = tree.DecisionTreeRegressor()
		
		# # regressor1 = GaussianNB()		
		# regressor1.fit(Features_Train, Targets_Train)		
		# Targets_Predict = regressor1.predict(Features_Test)		
		
		
		# fig, ax = plt.subplots()
		# ax.plot(Targets_Test, 'bo')
		# ax.plot(Targets_Predict, 'ro')
		# plt.show()
		
		
		Feature = TFeatures[np.argmax(np.absolute(corr))]
		plt.plot(Feature, 'm')
		plt.show()
		
		Feature = np.array(Feature)
		x_Feature = np.arange(len(Feature))
		
		Train = Feature[0:int(config['train']*len(Feature))]
		x_Train = np.arange(float(len(Train)))				
		
		x_Predict = np.linspace(len(Train), len(Feature), num=len(Feature) - len(Train), endpoint=False)
		
		
		# scaler = StandardScaler()
		# scaler = RobustScaler()
		# scaler.fit(Train)
		# Train = scaler.transform(Train)	

		clf = MLPRegressor(solver='lbfgs', alpha=1.e-1, hidden_layer_sizes=[700, 500], random_state=2, activation='identity', verbose=False)

		
		n_pre = int(0.2*len(Train))
		m_post = int(0.1*len(Train))
		n_ex = len(Train) - n_pre - m_post
		print('+++++++++++++Info: Input points n = ', n_pre)
		print('+++++++++++++Info: Output points m = ', m_post)
		print('+++++++++++++Info: Training examples = ', n_ex)
		a = input('enter to continue...')
		T_Inputs = []
		T_Outputs = []
		for k in range(n_ex + 1):

			T_Inputs.append(Train[k : k + n_pre])
			# print(Train[k : k + n_pre])
			# sys.exit()
			T_Outputs.append(Train[k + n_pre : k + n_pre + m_post])
		clf.fit(T_Inputs, T_Outputs)
		print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n')
		Predict = []
		It_Train = list(Train)

		for k in range(len(x_Predict) + m_post - 1):
			P_Input = It_Train[n_ex + k + 1 : n_ex + n_pre + k + 1]
			# print(P_Input)
			# sys.exit()
			P_Output = clf.predict([P_Input])
			P_Output = P_Output[0]
			
			
			Predict.append(P_Output[-1])
			It_Train.append(P_Output[-1])

		Predict = Predict[:-(m_post-1)]
	
		plt.plot(x_Feature, Feature, 'b', x_Predict, Predict, 'r', x_Train, Train, 'k')
		plt.show()
	
	
	elif config['mode'] == 'dbn_fuse_5':
	
		class MLPRegressorOverride(MLPRegressor):
		# Overriding _init_coef method
			count = 0
			def _init_coef(self, fan_in, fan_out):
				if self.activation == 'logistic':
					init_bound = np.sqrt(2. / (fan_in + fan_out))
				elif self.activation in ('identity', 'tanh', 'relu'):
					init_bound = np.sqrt(6. / (fan_in + fan_out))
				else:
					raise ValueError("Unknown activation function %s" %
									 self.activation)
				coef_init = caca
				print(caca)
				print(count)
				print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
				intercept_init = caca
				count += 1

				return coef_init, intercept_init
	
	
		print('Select MASTER Features xls')
		root = Tk()
		root.withdraw()
		root.update()
		filepath = filedialog.askopenfilename()
		root.destroy()		

		mydict = pd.read_excel(filepath)
		rownames = list(mydict.index.values)
		length_data = len(rownames)
		length_train = int(length_data*config['train'])		
		
		mydict = mydict.to_dict(orient='list')		

		Features_Train = []
		Features_Test = []
		Targets_Train = []
		Targets_Test = []
		Features = []
		for i in range(length_data):			
			example = [0]
			for feature in config['feature_array']:
				example.append(mydict[feature][i])
			Features.append(example)
			if i <= length_train:
				Features_Train.append(example)
				Targets_Train.append([mydict['Target'][i]])
			else:
				Features_Test.append(example)
				Targets_Test.append([mydict['Target'][i]])
		
		
		Features_Train = np.array(Features_Train)
		Features_Test = np.array(Features_Test)
		Features = np.array(Features)
		
		Targets_Train = np.array(Targets_Train)
		Targets_Test = np.array(Targets_Test)
		
		
		scaler_model = StandardScaler()
		scaler_model.fit(Features_Train)
		Features_Train = scaler_model.transform(Features_Train)
		Features_Test = scaler_model.transform(Features_Test)
		
		scaler_target = StandardScaler()
		scaler_target.fit(Targets_Train)
		Targets_Train = scaler_target.transform(Targets_Train)
		Targets_Test = scaler_target.transform(Targets_Test)
		
		
		Targets = np.array(list(Targets_Train) + list(Targets_Test))
		
		
		pca_model = PCA(n_components=6)
		pca_model.fit(Features_Train)
		

		Features_Train = pca_model.transform(Features_Train)
		Features_Test = pca_model.transform(Features_Test)
		Features = pca_model.transform(Features)
		
		# print(np.ravel(Targets_Train))
		# plt.plot(Targets_Train)
		# plt.show()
		corr = []
		TFeatures_Train = np.transpose(Features_Train)
		TFeatures = np.transpose(Features)
		for feature_pca in TFeatures_Train:

			corr.append(np.corrcoef(np.ravel(feature_pca), np.ravel(Targets_Train))[0][1])		
		print(corr)
		
		plt.plot(Features_Train)
		plt.show()
		
		plt.plot(TFeatures_Train[np.argmax(np.absolute(corr))])
		plt.show()
		
		
		

		# regressor1 = MLPRegressor(hidden_layer_sizes=[3] ,random_state=11, verbose=False, activation='identity', alpha=0.01)
		# # regressor1 = NuSVR(kernel='linear', nu=0.001)
		# # regressor1 = tree.DecisionTreeRegressor()
		
		# # regressor1 = GaussianNB()		
		# regressor1.fit(Features_Train, Targets_Train)		
		# Targets_Predict = regressor1.predict(Features_Test)		
		
		
		# fig, ax = plt.subplots()
		# ax.plot(Targets_Test, 'bo')
		# ax.plot(Targets_Predict, 'ro')
		# plt.show()
		caca = 45
		
		Feature = TFeatures[np.argmax(np.absolute(corr))]
		plt.plot(Feature, 'm')
		plt.show()
		
		Feature = np.array(Feature)
		x_Feature = np.arange(len(Feature))
		
		Train = Feature[0:int(config['train']*len(Feature))]
		x_Train = np.arange(float(len(Train)))				
		
		x_Predict = np.linspace(len(Train), len(Feature), num=len(Feature) - len(Train), endpoint=False)
		
		
		# scaler = StandardScaler()
		# scaler = RobustScaler()
		# scaler.fit(Train)
		# Train = scaler.transform(Train)	

		clf = MLPRegressorOverride(solver='lbfgs', alpha=1.e-1, hidden_layer_sizes=[700, 500], random_state=2, activation='identity', verbose=False)

		
		n_pre = int(0.2*len(Train))
		m_post = int(0.1*len(Train))
		n_ex = len(Train) - n_pre - m_post
		print('+++++++++++++Info: Input points n = ', n_pre)
		print('+++++++++++++Info: Output points m = ', m_post)
		print('+++++++++++++Info: Training examples = ', n_ex)
		a = input('enter to continue...')
		T_Inputs = []
		T_Outputs = []
		for k in range(n_ex + 1):

			T_Inputs.append(Train[k : k + n_pre])
			# print(Train[k : k + n_pre])
			# sys.exit()
			T_Outputs.append(Train[k + n_pre : k + n_pre + m_post])
		clf.fit(T_Inputs, T_Outputs)
		print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n')
		Predict = []
		It_Train = list(Train)

		for k in range(len(x_Predict) + m_post - 1):
			P_Input = It_Train[n_ex + k + 1 : n_ex + n_pre + k + 1]
			# print(P_Input)
			# sys.exit()
			P_Output = clf.predict([P_Input])
			P_Output = P_Output[0]
			
			
			Predict.append(P_Output[-1])
			It_Train.append(P_Output[-1])

		Predict = Predict[:-(m_post-1)]
	
		plt.plot(x_Feature, Feature, 'b', x_Predict, Predict, 'r', x_Train, Train, 'k')
		plt.show()
		
		
		
		
	
	else:
		print('unknown mode')
		sys.exit()

		
	return
    def lyrics_to_pdf(self):

        self.lyric_downloaded = False
        self.song = self.screen.ids.songname.text
        self.artist = self.screen.ids.artistname.text

        if (self.song == '' or self.artist == ''):
            close_btn = MDRaisedButton(text='Close',
                                       on_release=self.check_for_blank_close)
            self.blank_check_dialogue = MDDialog(title='Please enter values',
                                                 buttons=[close_btn])
            self.blank_check_dialogue.open()

        else:

            artist_name = self.screen.ids.artistname.text
            song_name = self.screen.ids.songname.text

            url = 'https://api.lyrics.ovh/v1/' + artist_name + '/' + song_name

            # fetch lyrics
            try:
                responce = requests.get(url)
                json_data = json.loads(responce.content)
                lyrics = json_data.get('lyrics', None)

                if lyrics == None:

                    close_btn = MDRaisedButton(
                        text='Close', on_release=self.check_for_blank_close)
                    self.blank_check_dialogue = MDDialog(
                        title='Song not found',
                        text=
                        'Try checking  the spelling of entered quantinties ',
                        buttons=[close_btn])
                    self.blank_check_dialogue.open()
                else:
                    files = [('text file', '*.txt')]
                    root = Tk()
                    root.withdraw()
                    file = askdirectory()
                    print(file)

                    # text_file_location= f'E:\lyrics\{song_name}.txt'
                    text_file_location = f'{file}/{song_name}.txt'
                    file1 = open(text_file_location, 'w+')
                    file1.write(lyrics)
                    file1.close()

                    os.chdir(file)

                    # text_file_location=f'E:\lyrics/{song_name}.txt'

                    # save FPDF() class into
                    # a variable pdf
                    pdf = FPDF()

                    # Add a page
                    pdf.add_page()

                    # set style and size of font
                    # that you want in the pdf
                    pdf.set_font("Arial", size=12)

                    # open the text file in read mode
                    f = open(text_file_location, "r")

                    # insert the texts in pdf
                    for x in f:
                        pdf.cell(200, 10, txt=x, ln=1, align='C')

                    # save the pdf with name .pdf
                    pdf.output(f'{song_name}.pdf')

                    self.lyric_downloaded = True

                    if (self.lyric_downloaded == True):
                        self.lyric_downloaded_popup()

            except:
                close_btn = MDRaisedButton(
                    text='Close', on_release=self.check_for_blank_close)
                self.blank_check_dialogue = MDDialog(
                    title='Error',
                    text='Please check your internet connection',
                    buttons=[close_btn])
                self.blank_check_dialogue.open()
Beispiel #47
0
from tkinter import messagebox, simpledialog, Tk
import random

# Create an if-main code block, *hint, type main then ctrl+space to auto-complete
if __name__ == '__main__':

    # Make a new window variable, window = Tk()
    w = Tk()
    # Hide the window using the window's .withdraw() method
    w.withdraw()
    # 1. Make a variable equal to a positive number less than 4 using random.randInt(0, 3)
    n = random.randint(0, 3)
    # 2. Print your variable to the console
    print(n)
    # 3. Get the user to enter something that they think is awesome
    simpledialog.askstring(title="", prompt="what do you think is awesome?")
    # 4. If your variable is  0
    # -- tell the user whatever they entered is awesome!
    if n == 0:
        messagebox.showinfo(message="that awesome")

    # 5. If your variable is  1
    # -- tell the user whatever they entered is ok.
    elif n == 1:
        messagebox.showinfo(message="meh")

    # 6. If your variable is  2
    # -- tell the user whatever they entered is boring.
    elif n == 2:
        messagebox.showinfo(message="Boring")
    # 7. If your variable is  3
Beispiel #48
0
    print('The NCBI API is not available now.')
    os._exit(0)

# Check the XML content for citation data
if 'LinkSetDb' not in citation_XML.text:
    print('Unable to find citation data for the specific PMID.')
    os._exit(0)

# Fetch the 'csrftoken'
s = requests.Session()
s.get(query_URI % PMID)
csrftoken = s.cookies['pm-csrf']

# Prompt for the filename
Tk_root = Tk()
Tk_root.withdraw()
format_mapping = {
    'nbib': 'pubmed',
    'csv': 'csv',
    'txt': 'summary-text',
    '~pmid': 'pmid',
    '~abs': 'abstract',
    '~sum': 'summary-text'
}
file_types = [('NBIB Formatted File (PubMed) (*.nbib)', '*.nbib'),
              ('Comma Separated Values (CSV) File (*.csv)', '*.csv'),
              ('Text File (PMIDs) (*.txt)', ('*.~pmid', '*.txt')),
              ('Text File (Summary) (*.txt)', ('*.~sum', '*.txt')),
              ('Text File (Abstract) (*.txt)', ('*.~abs', '*.txt')),
              ('All Files (*.*)', '*.*')]
default_name = "pubmed-%i.nbib" % PMID
Beispiel #49
0
def get_file():
    win = Tk()
    win.withdraw()
    win.wm_attributes('-topmost', 1)
    filename = askopenfilename(parent=win)
    return filename
        "А0", "А1", "А2", "А3", "А4", "Масштаб"
    ]

    # Заполняем таблицу
    for i, row in enumerate(result):
        sheet.Cells(i + 2, 1).value = row['Filename']
        sheet.Cells(i + 2, 2).value = row['Designer']
        sheet.Cells(i + 2, 3).value = row['CountDim']
        sheet.Cells(i + 2, 4).value = row['CountTD']
        sheet.Cells(i + 2, 5).value = row['A0']
        sheet.Cells(i + 2, 6).value = row['A1']
        sheet.Cells(i + 2, 7).value = row['A2']
        sheet.Cells(i + 2, 8).value = row['A3']
        sheet.Cells(i + 2, 9).value = row['A4']
        sheet.Cells(i + 2, 10).value = "".join(('="', row['Scale'], '"'))


if __name__ == "__main__":
    root = Tk()
    root.withdraw()  # Скрываем основное окно и сразу окно выбора файлов

    filenames = askopenfilenames(title="Выберети чертежи деталей",
                                 filetypes=[
                                     ('Компас 3D', '*.cdw'),
                                 ])

    print_to_excel(parse_design_documents(filenames))

    root.destroy()  # Уничтожаем основное окно
    root.mainloop()
Beispiel #51
0
                         end_row=end_row,
                         begin_col=begin_col,
                         temp_file=queryfile + ".temp")
        except Exception as e:
            q.log(e)
            return q.query_row
        finally:
            wb.save(filename=queryfile)
        return 0


if __name__ == "__main__":
    os.environ['Path'] = os.environ['Path'] + os.path.pathsep + ".\\driver"

    tk = Tk()
    tk.withdraw()

    filename = sys.argv[1] if len(
        sys.argv) > 1 else filedialog.askopenfilename(
            title=u'选择查询文件', initialdir=(os.path.expanduser('.')))
    begin_row = sys.argv[2] if len(sys.argv) > 2 else simpledialog.askinteger(
        title=u'起始行数', prompt=u'请输入起始行数', initialvalue=1)

    end_row = sys.argv[3] if len(sys.argv) > 3 else simpledialog.askinteger(
        title=u'终止行数', prompt=u'请输入终止行数,-1表示最后一行', initialvalue=-1)
    begin_col = sys.argv[4] if len(sys.argv) > 4 else simpledialog.askinteger(
        title=u'起始列数', prompt=u'请输入起始列数', initialvalue=1)

    query_row = int(begin_row)
    end_row = int(end_row)
    begin_col = int(begin_col)
Beispiel #52
0
def stitch_s_parameter_measurements():
    # Create and ready Tkinter object for GUI creation.
    root = Tk()
    root.withdraw()

    save_name = ""

    print("Please select directory that holds s2p files to be stitched.\n")

    time.sleep(1)

    # Prompts user to select directory using Tkinter.
    # Updates Tkinter object.
    directory = askdirectory()
    root.update()

    time.sleep(1)

    # Prompts user for a file name for new stitched file.
    # Ensures filename is valid (no ".").
    print("Please enter name for .ntwk file (Cannot contain \".\")")
    input_success = False
    while not input_success:
        save_name = input()

        if save_name.find(".") == -1:
            input_success = True

        else:
            print("\nInvalid save name. Save name cannot contain \".\"")

    time.sleep(1)

    print("\nPlease select a directory to save .ntwk file.\n")

    time.sleep(1)

    # Prompts user to select directory using Tkinter.
    # Updates and destroys Tkinter object.
    save_directory = askdirectory()
    root.update()

    time.sleep(1)

    root.destroy()

    # Creates a list of s2p files from directory.
    s2p_list = []
    for _ in os.listdir(directory):
        s2p_list.append(None)

    # Sorts all files in directory by CalState number.
    for s2p_file in os.listdir(directory):
        s2p_number = int(s2p_file[s2p_file.rfind("_") + 1:s2p_file.rfind(".")])
        s2p_list[s2p_number - 1] = rf.Network(directory + "/" + s2p_file)

    # Stitches s2p list to a .ntwk file and writes it in save_directory with name "save_name".
    stitch_s2p_list(s2p_list)[0].write(save_directory + "/" + save_name)

    print("File successfully saved as \"" + save_directory + "/" + save_name +
          "\"!")
Beispiel #53
0
def location_inventory_update(wb):
    wb_cur = wb
    ws_cur = wb.create_sheet('Location Inventory')
    file_path = "Z:/Accounting/Accounting/Financial Package/"
    dir_list = listdir(file_path)
    inv_file = [x for x in dir_list if 'Inventory By Location' in x]

    if len(inv_file) > 1:
        print('Multiple Inventory By Location Files, Process Cancelled!')
        sys.exit()

    msg_text = "Use Inventory File {f1}?".format(f1=inv_file)
    root = Tk()
    root.withdraw()
    answer_loc_file = messagebox.askquestion('Location File', msg_text)

    if answer_loc_file == 'no':
        print('Wrong Location File in Package Folder, Process Cancelled!')
        err_index = 1
        cont_return = [wb_cur, err_index]
        sys.exit()

    wb_source = load_workbook(file_path + inv_file[0], read_only=True)

    #Last Worksheet
    ws_source = wb_source.worksheets[len(wb_source.worksheets) - 1]

    #Last data row
    r = 1

    while True:
        if ws_source.cell(row=r, column=2).value == 'TOTAL INVENTORY':
            break
        if r > ws_source.max_row:
            break
        r += 1

    if r > ws_source.max_row:
        print(("Location Inventory: Can't find TOTAL INVENTORY record, "
               "Process Cancelled"))
        err_index = 1
        cont_return = [wb_cur, err_index]
        sys.exit()

    row_end = r - 1

    #Header data row
    r = 1

    while True:
        if ws_source.cell(row=r, column=2).value == 'Location':
            break
        if r > ws_source.max_row:
            break
        r += 1

    if r > ws_source.max_row:
        print(("Location Inventory: Can't find Location column header record, "
               "Process Cancelled"))
        err_index = 1
        cont_return = [wb_cur, err_index]
        sys.exit()

    row_header = r

    titles = []
    titles.append(ws_source.cell(row=1, column=1).value)
    titles.append(ws_source.cell(row=2, column=1).value)
    titles.append(ws_source.cell(row=3, column=1).value)

    header1 = [
        ws_source.cell(row=row_header, column=x).value for x in range(1, 7)
    ]

    #flat list of records
    records = []

    for r in range(row_header + 1, row_end + 1):
        for c in range(1, 5):
            records.append(ws_source.cell(row=r, column=c).value)

    #row list of records
    records2 = []

    for x in range(0, len(records), 4):
        records2.append(records[x:x + 4])

    for x in range(0, len(records2)):
        if records2[x][0] == None:
            records2[x][0] = records2[x - 1][0]
        if records2[x][0] == 'Managed':
            records2[x][0] = 'Vendor Managed'

    #remove blanks
    records3 = [
        x for x in records2 if float(x[2] or 0) + float(x[3] or 0) != 0
    ]

    #Write to Financial Package
    c1 = ws_cur.cell(row=1, column=1, value=titles[0])
    c1.font = Font(bold='true')
    c1.alignment = Alignment(horizontal='center')

    c1 = ws_cur.cell(row=2, column=1, value=titles[1])
    c1.font = Font(bold='true')
    c1.alignment = Alignment(horizontal='center')

    c1 = ws_cur.cell(row=3, column=1, value=titles[2])
    c1.font = Font(bold='true')
    c1.alignment = Alignment(horizontal='center')

    ws_cur.merge_cells(start_row=1, end_row=1, start_column=1, end_column=6)

    ws_cur.merge_cells(start_row=2, end_row=2, start_column=1, end_column=6)

    ws_cur.merge_cells(start_row=3, end_row=3, start_column=1, end_column=6)

    #Header
    r_next = 5

    for c in range(1, 7):
        c1 = ws_cur.cell(row=r_next, column=c, value=header1[c - 1])
        c1.fill = PatternFill(start_color='cdc9c9',
                              end_color='cdc9c9',
                              fill_type='solid')

    #Records
    r_next += 1
    c_next = 1

    for r in range(0, len(records3)):
        for e in range(0, len(records3[r])):
            ws_cur.cell(row=r_next, column=c_next, value=records3[r][e])
            c_next += 1
        c_next = 1
        r_next += 1

    r_next = 6

    for r in range(r_next, ws_cur.max_row + 1):
        formula1 = "={col_1}{row_cur}-{col_2}{row_cur}".format(
            row_cur=r, col_1=dcc.get(4), col_2=dcc.get(3))
        ws_cur.cell(row=r, column=5, value=formula1)

    for r in range(r_next, ws_cur.max_row + 1):
        formula1 = ("=if({col_1}{row_cur}=0,0,"
                    "{col_2}{row_cur}/{col_1}{row_cur})").format(
                        row_cur=r, col_1=dcc.get(3), col_2=dcc.get(5))
        ws_cur.cell(row=r, column=6, value=formula1)

    #Count region records
    regions = sorted(set([x[0] for x in records3]))
    region_count = {}

    for r in regions:
        region_count[r] = [x[0] for x in records3 if x[0] == r].count(r)

    #Totals
    r_next = ws_cur.max_row + 2
    ws_cur.cell(row=r_next, column=1, value='Total')
    inv_pm = sum([x[2] for x in records3])
    ws_cur.cell(row=r_next, column=3, value=inv_pm)
    inv_cm = sum([x[3] for x in records3])
    ws_cur.cell(row=r_next, column=4, value=inv_cm)

    formula1 = "={col_1}{row_cur}-{col_2}{row_cur}".format(row_cur=r_next,
                                                           col_1=dcc.get(4),
                                                           col_2=dcc.get(3))
    ws_cur.cell(row=r_next, column=5, value=formula1)

    formula1 = ("=if({col_1}{row_cur}=0,0,"
                "{col_2}{row_cur}/{col_1}{row_cur})").format(row_cur=r_next,
                                                             col_1=dcc.get(3),
                                                             col_2=dcc.get(5))
    ws_cur.cell(row=r_next, column=6, value=formula1)

    for c in range(1, 7):
        ws_cur.cell(row=r_next, column=c).font = Font(bold=True)

    #Region Totals
    r_next = ws_cur.max_row + 2
    regions_supplies = [x for x in regions if x != 'Vendor Managed']

    for r in range(0, len(regions_supplies)):
        ws_cur.cell(row=r_next, column=1, value=regions_supplies[r])
        inv_pm = sum([x[2] for x in records3 if x[0] == regions_supplies[r]])
        ws_cur.cell(row=r_next, column=3, value=inv_pm)
        inv_cm = sum([x[3] for x in records3 if x[0] == regions_supplies[r]])
        ws_cur.cell(row=r_next, column=4, value=inv_cm)

        formula1 = "={col_1}{row_cur}-{col_2}{row_cur}".format(
            row_cur=r_next, col_1=dcc.get(4), col_2=dcc.get(3))
        ws_cur.cell(row=r_next, column=5, value=formula1)

        formula1 = ("=if({col_1}{row_cur}=0,0,"
                    "{col_2}{row_cur}/{col_1}{row_cur})").format(
                        row_cur=r_next, col_1=dcc.get(3), col_2=dcc.get(5))
        ws_cur.cell(row=r_next, column=6, value=formula1)

        r_next += 1

    r_next += 1
    ws_cur.cell(row=r_next, column=1, value='Total Supplies Inventory')
    inv_pm_supplies = sum([x[2] for x in records3 if x[0] != 'Vendor Managed'])
    ws_cur.cell(row=r_next, column=3, value=inv_pm_supplies)
    inv_cm_supplies = sum([x[3] for x in records3 if x[0] != 'Vendor Managed'])
    ws_cur.cell(row=r_next, column=4, value=inv_cm_supplies)

    formula1 = "={col_1}{row_cur}-{col_2}{row_cur}".format(row_cur=r_next,
                                                           col_1=dcc.get(4),
                                                           col_2=dcc.get(3))
    ws_cur.cell(row=r_next, column=5, value=formula1)

    formula1 = ("=if({col_1}{row_cur}=0,0,"
                "{col_2}{row_cur}/{col_1}{row_cur})").format(row_cur=r_next,
                                                             col_1=dcc.get(3),
                                                             col_2=dcc.get(5))
    ws_cur.cell(row=r_next, column=6, value=formula1)

    for c in range(1, 7):
        ws_cur.cell(row=r_next, column=c).font = Font(bold=True)

    #Vendor Managed Total
    r_next += 2
    ws_cur.cell(row=r_next, column=1, value='Vendor Managed')
    inv_pm_vendor = sum([x[2] for x in records3 if x[0] == 'Vendor Managed'])
    ws_cur.cell(row=r_next, column=3, value=inv_pm_vendor)
    inv_cm_vendor = sum([x[3] for x in records3 if x[0] == 'Vendor Managed'])
    ws_cur.cell(row=r_next, column=4, value=inv_cm_vendor)

    formula1 = "={col_1}{row_cur}-{col_2}{row_cur}".format(row_cur=r_next,
                                                           col_1=dcc.get(4),
                                                           col_2=dcc.get(3))
    ws_cur.cell(row=r_next, column=5, value=formula1)

    formula1 = ("=if({col_1}{row_cur}=0,0,"
                "{col_2}{row_cur}/{col_1}{row_cur})").format(row_cur=r_next,
                                                             col_1=dcc.get(3),
                                                             col_2=dcc.get(5))
    ws_cur.cell(row=r_next, column=6, value=formula1)

    for c in range(1, 7):
        ws_cur.cell(row=r_next, column=c).font = Font(bold=True)

    #Format
    format_number = '_(* #,##0_);_(* (#,##0);_(* ""-""??_);_(@_)'
    for r in range(6, ws_cur.max_row + 1):
        for c in range(3, 6):
            ws_cur.cell(row=r, column=c).number_format = format_number

    for r in range(6, ws_cur.max_row + 1):
        for c in range(6, 7):
            ws_cur.cell(row=r, column=c).number_format = '0.0%'

    for c in range(1, 7):
        ws_cur.cell(row=5, column=c).alignment = Alignment(horizontal='center')
        ws_cur.cell(row=5, column=c).font = Font(bold=True)

    #Page Setup
    ws_cur.column_dimensions[dcc.get(1)].width = 24
    ws_cur.column_dimensions[dcc.get(2)].width = 33
    ws_cur.column_dimensions[dcc.get(3)].width = 18
    ws_cur.column_dimensions[dcc.get(4)].width = 18
    ws_cur.column_dimensions[dcc.get(5)].width = 18
    ws_cur.column_dimensions[dcc.get(6)].width = 13

    ws_cur.page_setup.orientation = ws_cur.ORIENTATION_PORTRAIT
    ws_cur.page_setup.paper_size = ws_cur.PAPERSIZE_TABLOID
    ws_cur.page_setup.fitToPage = True
    ws_cur.page_setup.fitToHeight = 1
    ws_cur.page_setup.fitToWidth = 1
    ws_cur.print_options.horizontalCentered = True
    ws_cur.add_print_title(5)
    ws_cur.page_margins = PageMargins(left=.5,
                                      right=.5,
                                      top=.5,
                                      bottom=.5,
                                      footer=.5)

    #Freeze Panes
    c1 = ws_cur.cell(row=6, column=1)
    ws_cur.freeze_panes = c1

    cont_return = [wb_cur]
    return cont_return
Beispiel #54
0
def plot_s_parameter_measurements(multiple_plots, plot_settings):
    # Create and ready Tkinter object for GUI creation.
    # Force GUI to front.
    root = Tk()
    root.lift()
    root.wm_attributes('-topmost', 1)
    root.withdraw()

    # List plot settings for easy access
    auto_scale_on = plot_settings[0]
    plot_dimensions = plot_settings[1]
    plot_x_axis_range = plot_settings[2]
    plot_y_axis_range = plot_settings[3]

    # List of .s2p measurements to plot.
    measurements_list = []

    # If there are multiple measurements to plot.
    if multiple_plots:
        print(
            "\nPlease select directory that contains S parameter measurements to plot.\n"
        )

        time.sleep(1)

        # Prompts user to select directory holding .s2p measurements to plot.
        measurement_directory = askdirectory()

        # Add all measurements to list.
        for measurement_file in os.listdir(measurement_directory):
            measurements_list.append(measurement_directory + "/" +
                                     measurement_file)

        # Sort the list by RF number.
        measurements_list = sort_measurements_list(measurements_list)

    # If there is only one measruement to plot.
    else:
        print("\nPlease select S parameter measurement to plot.\n")

        time.sleep(1)

        # Prompts user to select .s2p measurement to plot.
        measurement_file = askopenfilename()

        # Add measurement to list.
        measurements_list.append(measurement_file)

    # Update Tkinter object.
    root.update()

    time.sleep(1)

    print("Please select save location for plots.\n")

    # Prompts user to select directory to save plot(s).
    save_directory = askdirectory()

    time.sleep(1)

    print("Creating plots...\n")

    # Create plot for each .s2p measurement in list.
    for measurement_file in measurements_list:
        plt.figure(figsize=plot_dimensions)

        # Plot title is "filename" taken from "directory/filename.s2p"
        plot_title = measurement_file[measurement_file.rfind("/") +
                                      1:measurement_file.rfind(".")]
        plt.title(plot_title)

        print("Creating plot: \"" + plot_title + "\"...")

        # Uses skrf to plot .s2p measurements.
        # skrf automatically sets axis titles and legend.
        s_parameter_network = rf.Network(measurement_file)

        # Plot only S21
        s_parameter_network.plot_s_db(1, 0)

        # Plot bounds (Change at will)
        plt.xlim(plot_x_axis_range)
        plt.ylim(plot_y_axis_range)

        # If auto-scaling is turned on, auto-scale the plot.
        # This overrides the previously set axis ranges
        if auto_scale_on:
            plt.autoscale(enable=True)

        plt.grid()

        # Saves file in save_directory.
        plt.savefig(save_directory + "/" + plot_title)

        time.sleep(.5)

    # If there are multiple plots, it is convenient to create a plot to compare.
    if multiple_plots:
        print("Creating plot: \"Measurement Comparison\"...\n")
        plt.figure(figsize=plot_dimensions)
        plot_title = "Measurement Comparisons"
        plt.title(plot_title)
        plt.xlim(plot_x_axis_range)
        plt.ylim(plot_y_axis_range)

        # If auto-scaling is turned on, auto-scale the plot.
        # This overrides the previously set axis ranges
        if auto_scale_on:
            plt.autoscale(enable=True)

        plt.xlabel("Frequency (Hz)")
        plt.ylabel("Magnitude (dB)")
        plot_legend = []

        # For each measurement, add the measurement to the comparison plot.
        for measurement_file in measurements_list:
            # Uses skrf to plot .s2p/.ntwk files.
            s_parameter_network = rf.Network(measurement_file)
            plt.plot(s_parameter_network.frequency.f,
                     s_parameter_network.s_db[:, 1, 0])

            # Labels plots by RF #.
            rf_number = measurement_file[measurement_file.rfind("_") +
                                         1:measurement_file.rfind(".")]
            plot_legend.append("RF " + rf_number)

        plt.legend(plot_legend)
        plt.grid()

        plt.savefig(save_directory + "/" + plot_title)
        time.sleep(.5)

    # Destroy Tkinter object and close all plots.
    root.destroy()
    plt.close('all')

    if multiple_plots:
        print("Plots finished!")

    else:
        print("\nPlot finished!")
 def folderWarning(self):
     root = Tk()
     root.withdraw()
     messagebox.showwarning(title='Warning', message="Folder doesn't exist")
     root.destroy()
Beispiel #56
0
 def filenotfound():
     root = Tk()
     root.withdraw()
     messagebox.showerror("Error", "FILE '{}.mass' NOT FOUND".format(fname))
     root.destroy()
Beispiel #57
0
    label1 = Label(regform, text="Enter your first and last name")
    label1.place(x=50, y=10)
    e1 = Entry(regform)
    e1.place(x=225, y=10)
    e1.focus_set()

    label2 = Label(regform, text="Enter the number of your group")
    label2.place(x=50, y=40)
    e2 = Entry(regform)
    e2.place(x=225, y=40)

    btn1 = Button(regform,
                  text="Continue",
                  width=15,
                  height=3,
                  command=lambda: gotoVar(root, regform, e1, e2))
    btn1.place(x=50, y=80)
    btn2 = Button(regform,
                  text="Exit",
                  width=15,
                  height=3,
                  command=lambda: exitAll(root))
    btn2.place(x=200, y=80)


root = Tk()
root.withdraw()
showReg(root)

root.mainloop()
Beispiel #58
0
from tkinter import messagebox, simpledialog, Tk

# Create an if-main code block 
if __name__ == '__main__':
    
    # Make a new window variable, window = Tk()
    window = Tk()
    # Hide the window using the window's .withdraw() method
    window.withdraw()
    # Ask the user for their name and save it to a variable
    # name = simpledialog.askstring(title='Greeter', prompt="What is your name?")
    name = simpledialog.askstring(title='Greeter', prompt="What is your name?")
    # Show a message box with your message using the .showinfo() method
    messagebox.showinfo(title= "Message", message="Hello " + name)
    # Print your message to the console using the print() function
    print("Hello " + name)
    # Show an error message using messagebox.showerror()
    messagebox.showerror(title= "Warning", message="Hello " + name)
    # Run the window's .mainloop() method
    window.mainloop()
Beispiel #59
0
def show_error(title, msg):
    root = Tk()
    root.withdraw()  # hide main window
    messagebox.showerror(title, msg)
    root.destroy()
Beispiel #60
0
        self.errFileVar.set("");
        self.courseVar.set("");
        self.courseVar2.set("");
        self.fileVar.set("");
        self.fileVar2.set("");
        self.sepVar.set(",");
        self.sepVar2.set(",");
        self.p2.lift();
        self.cancelButton.config(text = "cancel", command=exit);
        self.closeP6.place_forget();
        self.progressLabel["text"] = "Working, this might take a couple of minutes...";
        self.progress["mode"] = 'indeterminate';
        
if __name__ == "__main__":
    root = Tk();
    root.withdraw();
    
    screen_width = root.winfo_screenwidth();
    screen_height = root.winfo_screenheight();

    # calculate position x and y coordinates
    x = (screen_width/2) - (framewidth/2);
    y = (screen_height/2) - (frameheight/2);
    
    window = Toplevel(root);
    window.geometry("%dx%d+%d+%d" %(framewidth, frameheight, x, y));
    window.resizable(False, False);
    
    window.iconbitmap(resource_path("elslogo.ico"));    
    
    main = MainView(window);