예제 #1
0
def main(file):
    # input file methods
    inputList = FileIO.input(
        file)  # List that will be used to make computations

    # Calculates and returns the overall course average grade based on this weighting scale
    # Quizzes - 20% | Homework - 30% | Exams - 50%
    def getCourseAvg(student):
        avg = 0
        avg = ((int(student[1]) + int(student[2]) + int(student[3])) / 3) * .20
        avg += ((int(student[4]) + int(student[5]) + int(student[6]) +
                 int(student[7]) + int(student[8])) / 5) * .3
        avg += ((int(student[9]) + int(student[10])) / 2) * .5
        avg = round(avg, 1)
        # print(avg)
        return avg

    # Returns a letter grade based on the previously calculated overall course average
    def getLetterGrade(number):
        if number >= 90:
            return 'A'
        elif 80 <= number < 90:
            return 'B'
        elif 70 <= number < 80:
            return 'C'
        elif 60 <= number < 70:
            return 'D'
        elif number < 60:
            return 'F'

    # Returns a new list populated with student name, overall course average grade, and letter grade
    def populateList(listB):

        list = []
        # add the student name
        list.append(listB[0])

        # getCourseAvg
        list.append(getCourseAvg(listB))

        # getLetterGrade
        list.append(getLetterGrade(getCourseAvg(listB)))

        return list

    listB = FileIO.input(open("studentGrades.txt"))

    newList = []
    k = 0
    for x in listB:
        newList.append(populateList(listB[k]))
        k += 1

    # Output file with updated course averages
    FileIO.output(newList)
예제 #2
0
    def compute_ev(self):
        """
        Computes the expected values of all teams participating in games in the final killersport csv and
        writes the resulting csv to memory
        """
        ev_csv_filename = \
            " ".join(
                list([
                    self.get_sport_name(),
                    EVGlobals.EV_CSV_SUFFIX,
                    "(" + ", ".join(list([str(self.get_time_period()), self.get_weight_fn_str()])) + ").csv"
                ])
            )  # specify the ev csv filename

        ev_csv = list([EVGlobals.EV_CSV_HEADER,
                       list()])  # initialize the matrix for the ev csv

        final_ks_csv = self.get_final_ks_csv(
        )  # get the final killersports csv

        for row_index in range(
                2, len(final_ks_csv),
                3):  # for every game in the final killersports csv matrix
            fav_row = list(final_ks_csv[row_index]
                           )  # get the favorite, underdog and spacer rows
            dog_row = list(final_ks_csv[row_index + 1])
            spacer_row = list(final_ks_csv[row_index + 2])

            fav_payout = float(
                fav_row[7])  # get the favorite and underdog payouts
            dog_payout = float(dog_row[7])

            fav_win_pct = float(
                fav_row[8])  # get the favorite and underdog win percentages
            dog_win_pct = float(dog_row[8])

            fav_ev = fav_payout * fav_win_pct - (
                1 - fav_win_pct
            )  # compute the favorite and underdog expected values
            dog_ev = dog_payout * dog_win_pct - (1 - dog_win_pct)

            # append the computed expected values to the end of the favorite and underdog rows
            fav_row.append(str(fav_ev))
            dog_row.append(str(dog_ev))

            ev_csv.append(
                fav_row)  # add the new favorite, underdog and spacer rows
            ev_csv.append(dog_row)
            ev_csv.append(spacer_row)

        FileIO.write_csv(ev_csv_filename,
                         ev_csv)  # write the resulting ev csv to memory
예제 #3
0
def save_global_feature(sess, ops, saver, layers):
    feature_name = 'global_feature'
    file_name_vec = ['train_' + feature_name, 'test_' + feature_name]
    Files_vec = [TRAIN_FILES, TEST_FILES]
    #Restore variables that achieves the best validation accuracy from the disk.
    saver.restore(
        sess,
        os.path.join(LOG_DIR, FLAGS.model + str(NAME_MODEL) + "_model.ckpt"))
    log_string("Model restored.")
    is_training = False
    # Extract the features from training set and validation set.
    for r in range(2):
        file_name = file_name_vec[r]
        Files = Files_vec[r]
        global_feature_vec = np.array([])
        label_vec = np.array([])
        for fn in range(len(Files)):
            log_string('----' + str(fn) + '----')
            current_data, current_label = provider.loadDataFile(Files[fn])
            current_data = current_data[:, 0:NUM_POINT, :]
            current_label = np.squeeze(current_label)
            print(current_data.shape)

            file_size = current_data.shape[0]
            num_batches = file_size // BATCH_SIZE
            print(file_size)

            for batch_idx in range(num_batches):
                start_idx = batch_idx * BATCH_SIZE
                end_idx = (batch_idx + 1) * BATCH_SIZE
                # Input the point cloud and labels to the graph.
                feed_dict = {
                    ops['pointclouds_pl']:
                    current_data[start_idx:end_idx, :, :],
                    ops['labels_pl']: current_label[start_idx:end_idx],
                    ops['is_training_pl']: is_training
                }
                # Extract the global features from the input batch data.
                global_feature = np.squeeze(layers[feature_name].eval(
                    feed_dict=feed_dict, session=sess))

                if label_vec.shape[0] == 0:
                    global_feature_vec = global_feature
                    label_vec = current_label[start_idx:end_idx]
                else:
                    global_feature_vec = np.concatenate(
                        [global_feature_vec, global_feature])
                    label_vec = np.concatenate(
                        [label_vec, current_label[start_idx:end_idx]])
        # Save all global features to the disk.
        FileIO.write_h5('data/extracted_feature/' + file_name + '.h5',
                        global_feature_vec, label_vec)
예제 #4
0
파일: scan.py 프로젝트: mtstamp/stamp
    def __init__(self,
                 pileup_file,
                 sample=0,
                 name="s",
                 base_quality=20,
                 min_reads_rate=0.5,
                 min_depth=10,
                 min_depth_fwd=1,
                 min_depth_rev=1,
                 min_minor_depth=1,
                 min_minor_depth_fwd=1,
                 min_minor_depth_rev=1,
                 min_het_freq=0.01):
        """ 
		the __init__ method
		
		Arguments
		----------
		see class Attributes 
		"""
        if (isinstance(pileup_file, str)):
            self.fh = FileIO(pileup_file, "r")
            self.fh_to_close = True
        else:
            self.fh = pileup_file
            self.fh_to_close = False
        if (isinstance(name, str)):
            name = [
                name,
            ]
        if (sample is None):
            sample = range(len(name))
        elif (isinstance(sample, int)):
            sample = [
                sample,
            ]
        assert len(sample) == len(
            name), "Sample and Name should be of same length."
        self.sample = sample
        self.name = name
        self.base_quality = base_quality
        self.min_reads_rate = min_reads_rate
        self.min_depth = min_depth
        self.min_depth_fwd = min_depth_fwd
        self.min_depth_rev = min_depth_rev
        self.min_minor_depth = min_minor_depth
        self.min_minor_depth_fwd = min_minor_depth_fwd
        self.min_minor_depth_rev = min_minor_depth_rev
        self.min_het_freq = min_het_freq
    def __init__(self,
                 target_csv,
                 webdriver_path,
                 branch_url='',
                 read_csv=False,
                 time_threshold=1,
                 verbose=True):
        """
        Constructor for the BaseVISpider class - initializes instances of the class and the attributes
        of the created object
        """
        self.target_csv = target_csv  # initialize the attributes of this object
        self.webdriver_path = webdriver_path
        self.base_url = 'http://www.vegasinsider.com/'
        self.branch_url = branch_url
        self.read_csv = read_csv
        self.time_threshold = time_threshold
        self.verbose = verbose

        # if the read_csv option is True, read the csv stored in the target_csv path
        if self.read_csv:
            self.csv_matrix = FileIO.read_csv(self.target_csv)
            # otherwise initialize a new csv matrix
        else:
            self.csv_matrix = list(
                [list(VIGlobals.OVERALL_CSV_HEADER),
                 list()])
예제 #6
0
    def load(self, fastq_r1, fastq_r2):
        """ 
		open new R1 and R2 fastq files
		
		Arguments
		----------
		fastq_r1: the path to the R1 fastq file
		fastq_r2: the path to the R2 fastq file
		
		"""
        if (self.r1):
            self.r1.close()
        self.r1 = FileIO(fastq_r1, "r")
        if (self.r2):
            self.r2.close()
        self.r2 = FileIO(fastq_r2, "r2")
예제 #7
0
    def __init__(self, master=None):
        Frame.__init__(self, master)
        master.title('File Converter')
        self.pack()

        self.fileIO = FileIO()
        self.fromFile = None
        self.toFile = None
        self.AC = AvailableConversions()
        self.conversionTable = self.AC.getConversionTable()

        self.fromFileType = DOT_HEX
        self.toFileType = DOT_MIF

        self.menuBar()

        self.mainFrame()
예제 #8
0
    def __init__(self, sport_name, time_period, verbose=True):
        """
        Constructor for the WinPercentageCalculator class - initializes instances of the class and the
        attributes of the created object
        """
        self.sport_name = sport_name  # initialize the attributes of this object
        self.time_period = time_period
        self.verbose = verbose

        self.overall_csv = \
            FileIO.read_csv(
                self.sport_name + " " + VIGlobals.OVERALL_CSV_SUFFIX + " (" + str(self.time_period) + ").csv"
            )  # read the overall csv

        self.ks_csv = \
            FileIO.read_csv(
                self.sport_name + " " + KSGlobals.KS_CSV_SUFFIX + " (" + str(self.time_period) + ").csv"
            )  # read the ks csv containing all the spreads
예제 #9
0
    def __init__(self, master=None):
        Frame.__init__(self, master)
        master.title('MIF file Hex Parameters')
        self.pack()

        self.parameters = ['depth','width','address_radix','data_radix', 'zero_fill']

        self.fio = FIO()

        self.mainFrame()
예제 #10
0
파일: template.py 프로젝트: Asperado/iconic
def main(argv):      
  from CmdArgumentsHelper import CmdArgumentsHelper;
  arg_helper = CmdArgumentsHelper();
  arg_helper.add_argument('query', 'q', 'query', 1);
  arg_helper.add_argument('root_dir', 'r', 'root', 1);
  arg_helper.add_argument('output_dir', 'o', 'outdir', 1);
  args = arg_helper.read_arguments(argv);
  print (args);

  query = args['query'];
  images = gen_flickr_image_info(args['root'], query);
  image_urls = images['image_urls'];
  image_ids = images['image_ids'];
  downloader = ImageDownloader();
  output_dir = args['output_dir'] + query;
  from FileIO import FileIO;
  fileIO = FileIO();
  fileIO.create_folders(output_dir);
  downloader.download_images(image_urls, image_ids, output_dir);
예제 #11
0
    def __init__(self, master=None):
        Frame.__init__(self, master)
        master.title('MIF file Hex Parameters')
        self.pack()

        self.parameters = [
            'depth', 'width', 'address_radix', 'data_radix', 'zero_fill'
        ]

        self.fio = FIO()

        self.mainFrame()
예제 #12
0
def job():
    perser = JsonLoader()
    apiRouteIstanbul= apiRouteForBranch('istanbul')
    apiRouteIzmir= apiRouteForBranch('izmir')
    getCurrentWeek = getCurrentDateTime('europe/istanbul').isocalendar()[1]
    getPreviousWeek= getCurrentWeek - 1
    apiRouteIstanbul= apiRouteIstanbul+ str(getPreviousWeek)
    print(apiRouteIstanbul)
    apiRouteIzmir= apiRouteIzmir +str(getPreviousWeek)
    print(apiRouteIzmir)
    #Get json data from apiroute call
    perser.loadJsonData(apiRouteIstanbul)
    dataIstanbul = perser.getJsonData()

    perser.loadJsonData(apiRouteIzmir)
    dataIzmir = perser.getJsonData()


    
    #Create file that has to be sent as notification
    file= FileIO()
    fileIstanbul=file.createCSVFile(dataIstanbul, 'istanbul')
    #fileIzmir= file.createCSVFile(dataIzmir, 'izmir')

    #reading environment variables

    #senderEmail = os.environ['SENDER_EMAIL']
    #senderPassword = os.environ['SENDER_PASSWORD']
    #getting senderName and password
	#print(senderEmail)
	#print(senderPassword)

    msg = "The attached file has last week's food request list."
    #send email activities
    email = EmailActivity()
    email.message('*****@*****.**',"*****@*****.**",
                  "Food request list",msg)
    email.addAttachment("./files/"+fileIstanbul, fileIstanbul)
    #email.addAttachment("./files/" + fileIzmir, fileIzmir)
    email.sendMail('*****@*****.**','Acifl1234','smtp.gmail.com',587)
예제 #13
0
def job():
    perser = JsonLoader()
    apiRouteIstanbul = apiRouteForBranch('istanbul')
    apiRouteIzmir = apiRouteForBranch('izmir')
    getCurrentWeek = getCurrentDateTime('europe/istanbul').isocalendar()[1]
    getPreviousWeek = getCurrentWeek - 1
    apiRouteIstanbul = apiRouteIstanbul + str(
        getPreviousWeek)  # Need to change to getPreviousWeek

    apiRouteIzmir = apiRouteIzmir + str(
        getPreviousWeek)  # Need to change to getPreviousWeek
    # Get json data from apiroute call
    perser.loadJsonData(apiRouteIstanbul)
    dataIstanbul = perser.getJsonData()

    perser.loadJsonData(apiRouteIzmir)
    dataIzmir = perser.getJsonData()

    # Create file that has to be sent as notification
    file = FileIO()
    fileIstanbul = file.createCSVFile(dataIstanbul, 'istanbul')
    fileIzmir = file.createCSVFile(dataIzmir, 'izmir')

    # reading environment variables

    # environment variables are stored in host server machine.
    senderEmail = os.environ['SENDER_EMAIL']
    senderPassword = os.environ['SENDER_PASSWORD']
    # getting senderName and password

    msg = "The attached file has current week's food request list."
    # send email activities
    email = EmailActivity()
    email.message(senderEmail, "*****@*****.**", "Food request list",
                  msg)
    email.addAttachment("./files/" + fileIstanbul, fileIstanbul)
    email.addAttachment("./files/" + fileIzmir, fileIzmir)
    email.sendMail(senderEmail, senderPassword, 'smtp.gmail.com', 587)
예제 #14
0
    def __init__(self, fastq_r1, fastq_r2, quality_check=False):
        """ 
		the __init__ method
		
		Arguments
		----------
		fastq_r1: the path to the R1 fastq file
		fastq_r2: the path to the R2 fastq file
		quality_check: true/false if recording read information of R1 and R2
		
		"""
        self.r1 = FileIO(fastq_r1, "r")
        self.r2 = FileIO(fastq_r2, "r")
        self.r1_phred = [
            0,
        ] * 500  #500 bp, long enough for most NGSs
        self.r2_phred = [
            0,
        ] * 500
        self.r1_length = {}
        self.r2_length = {}
        #self.n = 0
        self.qc = quality_check
예제 #15
0
def plot_all_segmentation():
    fig_size = [4, 3]
    fig = plt.figure(figsize=tuple(fig_size))
    for i in range(0, 2800, 10):
        file_name = 'test_results/' + str(i) + '_pred.obj'
        cloud, colors = FileIO.load_obj_file(file_name)
        y = np.copy(cloud[:, 1])
        cloud[:, 1] = cloud[:, 2]
        cloud[:, 2] = y
        PlotClass.subplot_color_points(cloud, '', colors, fig, 1, 1, 1,
                                       axis_off=True)
        bbox = fig.bbox_inches.from_bounds(0, 0, fig_size[0], fig_size[1])  # for single plot
        img_dir = 'images/all_segmentation'
        if not os.path.exists(img_dir):
            os.makedirs(img_dir)
        plt.savefig(img_dir + '/{0:4d}.png'.format(i), bbox_inches=bbox, dpi = 300)
예제 #16
0
    def __init__(self, master=None):
        Frame.__init__(self, master)
        master.title('File Converter')
        self.pack()

        self.fileIO = FileIO()
        self.fromFile = None
        self.toFile = None
        self.AC = AvailableConversions()
        self.conversionTable = self.AC.getConversionTable()

        self.fromFileType = DOT_HEX
        self.toFileType = DOT_MIF

        self.menuBar()

        self.mainFrame()
예제 #17
0
    def __init__(self, sport_name, time_period, weight_fn_str, verbose=True):
        """
        Constructor for the BettingMachine class - initializes instances of the class and the attributes
        of the created object
        """
        self.sport_name = sport_name  # initialize the attributes of this object
        self.time_period = time_period
        self.weight_fn_str = weight_fn_str
        self.verbose = verbose

        self.ev_csv = \
            FileIO.read_csv(
                " ".join(list([
                    self.get_sport_name(),
                    EVGlobals.EV_CSV_SUFFIX,
                    "(" + ", ".join(list([str(self.get_time_period()), self.get_weight_fn_str()])) + ").csv"
                ]))
            )  # read the ev csv containing all the expected values
예제 #18
0
    def __init__(self, sport_name, time_period, weight_fn_str, verbose=True):
        """
        Constructor for the EVCalculator class - initializes instances of the class and the attributes
        of the created object
        """
        self.sport_name = sport_name  # initialize the attributes of this object
        self.time_period = time_period
        self.weight_fn_str = weight_fn_str
        self.verbose = verbose

        self.final_ks_csv = \
            FileIO.read_csv(
                " ".join(list([
                    self.get_sport_name(),
                    WPGlobals.FINAL_KS_CSV_SUFFIX,
                    "(" + ", ".join(list([str(self.get_time_period()), self.get_weight_fn_str()])) + ").csv"
                ]))
            )  # read the ks csv containing all the spreads
예제 #19
0
def parse(fileName, externalFile=False):
    try:
        if externalFile:
            fio = FIO()
            fio.openFile('.csv', ftypes=[('Comma/Semicolon Delimited', '.csv'), ('All Files', '.*')], ifilen = fileName + '.csv')
            instSetFile = fio.getOpenedFile()
            fio.closeOpened()
        else:
            instSetFile = open(fileName)
        noFile = False
    except:
        print("Could not open file.")
        noFile = True

    if not noFile:
        instruction_table = dict()
        for line in instSetFile:
            instLine = []
            opCode = ''
            bytes = 1
            mnemonic = ''
            instLine = line.split(';')
            instLine[2].replace('\n','')
            mnemFilter = re.findall(r'([\?\w]+\s*)([@.#.\+\w\d\s]*)\s*,*\s*([@.#.\+\w\d\s]*)', instLine[0])

            mnemFilter = mnemFilter[0]
            print ("instLine", instLine)

            print('mnemFilter', mnemFilter)



            mnemonic = mnemFilter[0]
            if mnemFilter[1] != '':
                if not any(param in mnemFilter[1] for param in PARAMETER_CONSTANTS):
                    mnemonic += '%s'
                else:
                    mnemonic += mnemFilter[1]
            if mnemFilter[2] != '':
                if not any(param in mnemFilter[2] for param in PARAMETER_CONSTANTS):
                    mnemonic += ', %s'
                else:
                    mnemonic += ', ' + mnemFilter[2]

            opCode = instLine[1].replace('0x', '')
            bytes = int(instLine[2])

            instruction_table[opCode] = (mnemonic, bytes)
        
        instSetFile.close()
        return instruction_table
예제 #20
0
    def __init__(self, target_csv, webdriver_path, branch_url='', read_csv=False, query_timeout=15, ntrials=5,
                 verbose=True):
        """
        Constructor for the KSSpider class - initializes instances of the class and the attributes of
        the created object
        """
        self.target_csv = target_csv  # initialize the attributes of this object
        self.webdriver_path = webdriver_path
        self.base_url = 'http://killersports.com/'
        self.branch_url = branch_url
        self.read_csv = read_csv

        # if the read_csv option is True, read the csv stored in the target_csv path
        if self.read_csv: self.csv_matrix = FileIO.read_csv(self.get_target_csv())
        # otherwise initialize a new csv matrix
        else: self.csv_matrix = list([KSGlobals.KS_CSV_HEADER, list()])

        self.query_timeout = query_timeout
        self.ntrials = ntrials
        self.verbose = verbose
예제 #21
0
def plot_segmentation_results():
    obj_file = '_pred'
    #obj_file = '_gt'
    fig_size = [2, 8] #width,height
    fig = plt.figure(figsize= (fig_size[0],fig_size[1]))
    idx_vec = np.array([45,163,263,13,16]) # plane, motorbike, car, chair, table
    for r in range(5):
        file_name = 'test_results/' + str(idx_vec[r]) + obj_file + '.obj'
        cloud, colors = FileIO.load_obj_file(file_name)
        np.save('images/{}.npy'.format(idx_vec[r]), cloud)
        print(cloud.shape)
        # switch z and y axis
        y = np.copy(cloud[:, 1])
        cloud[:, 1] = cloud[:, 2]
        cloud[:, 2] = y
        PlotClass.subplot_color_points (cloud,'', colors ,fig, 5, 1, r+1,
                       axis_off = True)
    time_str = datetime.now().strftime('%Y-%m-%d-%H_%M_%S')
    bbox = fig.bbox_inches.from_bounds(0, 0, fig_size[0], fig_size[1])#for single plot
    img_dir = 'images'
    if not os.path.exists(img_dir):
        os.makedirs(img_dir)
    plt.savefig(img_dir +'/'+ time_str + obj_file + '_test.png',bbox_inches=bbox)
    plt.show()
예제 #22
0
    def read_instance(self, forward_folder):
        fileio = FileIO()
        fileio.assign_forward_folder(forward_folder)
        i = 1
        self.fhn_model_instances = fileio.read_physics_model_instance(i, 'fhn')
        self.diffusion_model_instances = fileio.read_physics_model_instance(
            i, 'diffusion')
        self.point_cloud_instances = fileio.read_point_cloud_instance(i)

        # ========================== get variable  ================================ #
        self.coord = self.point_cloud_instances['coord']
        self.no_pt = self.point_cloud_instances['no_pt']
        self.t = self.fhn_model_instances['t']
        self.V = self.fhn_model_instances['V']
        self.v = self.fhn_model_instances['v']
        self.a = self.fhn_model_instances['a']
        self.delta = self.fhn_model_instances['delta']
        self.gamma = self.fhn_model_instances['gamma']
        self.stimulated_current = np.max(
            self.fhn_model_instances['applied_current'])
        self.D = self.diffusion_model_instances['D']
        self.c = self.diffusion_model_instances['c']
        return
예제 #23
0
 def __init__(self):
     self.hexConversionTable = {DOT_HEX : [DOT_MIF, DOT_INO, DOT_A51]}
     self.fio = FIO()
예제 #24
0
import sys
from Processor import Processor
from FileIO import FileIO
if __name__ =='__main__':
    inFileName = sys.argv[1]
    outFileName = 'result_'+inFileName
    if len(sys.argv)==3:
        outFileName = sys.argv[2]

    fileStream = FileIO(inFileName,outFileName)
    processor= Processor()
    DNA = fileStream.readFile(inFileName)
    aminoSeq = processor.process(DNA)
    fileStream.outFile(outFileName,aminoSeq)
예제 #25
0
def main():
    # start_time=time.time()
    IM = ACIM()
    OB = Observer()
    CTRL = CTRL0()
    IM.Machine_init()
    OB.acm_init(IM)
    OB.ob_init()
    CTRL.CTRL_INIT(IM)
    sc = s_curve()

    IM_items = vars(IM)
    OB_items = vars(OB)
    CTRL_items = vars(CTRL)

    # print("{0}\n{1}\n{2}\n".format(IM_items,OB_items,CTRL_items))

    dfe = 0  # dfe for down frequency execution

    f = open(r'algorithm.dat', 'w')
    fio = FileIO()
    fio.write_header_to_file(f)

    for _ in range(NUMBER_OF_LINES):

        # Command and Load Torque */
        # cmd_fast_speed_reversal(CTRL.timebase, 5, 5, 1500); // timebase, instant, interval, rpm_cmd

        # CTRL.cmd_fast_speed_reversal(5, 5, 100, IM) # timebase, instant, interval, rpm_cmd
        sc.speed_ref(CTRL.timebase, IM)
        # ACM.Tload = 5 * sign(ACM.rpm);
        # if CTRL.timebase>= 5.00275 :
        #     print('no')

        IM.Tload = 10 * np.sign(IM.rpm)  # No-load test
        #print(IM.rpm)
        # ACM.Tload = ACM.Tem; // Blocked-rotor test

        # Simulated ACM */
        if (IM.machine_simulation(CTRL)):
            print("Break the loop.\n")
            break
        dfe += 1
        if (dfe == DOWN_FREQ_EXE):
            dfe = 0

            # Time */
            CTRL.timebase += TS

            OB.measurement(IM, CTRL)

            OB.observation(CTRL)

            fio.write_data_to_file(f, IM, CTRL)

            CTRL.control(IM.rpm_cmd, 0, OB, IM)

        IM.inverter_model(CTRL)
    f.close()
    # print("Simulation time=",time.time()-start_time)
    ACMPlot.draw_trend()
예제 #26
0
sys.path.insert(1, '/home/sawsn/Shiernee/FHN/src/utils')
sys.path.insert(1, '/home/sawsn/Shiernee/FileIO/src/utils')

from ViewResultsUtils import ViewResultsUtils
from FileIO import FileIO
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as ss

if __name__ == '__main__':
    # case1_1D_D1_c0, case2_sphere_D1_c0, case3_2D_D1_c0,
    forward_folder = '../data/case2_2Dgrid_100/forward_6561pt/'
    START_TIME = 0
    END_TIME = 700

    fileio = FileIO()
    fileio.assign_forward_folder(forward_folder)
    i = 1
    fhn_model_instances = fileio.read_physics_model_instance(i, 'fhn')
    diffusion_model_instances = fileio.read_physics_model_instance(
        i, 'diffusion')
    point_cloud_instances = fileio.read_point_cloud_instance(i)

    # ========================== get variable  ================================ #
    coord = point_cloud_instances['coord']
    no_pt = point_cloud_instances['no_pt']
    t = fhn_model_instances['t']
    V = fhn_model_instances['V']
    v = fhn_model_instances['v']
    a = fhn_model_instances['a']
    delta = fhn_model_instances['delta']
예제 #27
0
    path = 'data/extracted_feature'
#    path = 'data/modelnet40_ply_hdf5_2048/'
    TRAIN_FILES_VEC[i] = provider.getDataFiles( \
        os.path.join(BASE_DIR, path + '/train_files.txt'))
    TEST_FILES_VEC[i] = provider.getDataFiles(\
        os.path.join(BASE_DIR, path + '/test_files.txt'))

## data are point cloud 
#data = np.array([], dtype=np.float32).reshape(0,2048,3)
#label = np.array([], dtype=np.float32).reshape(0,1)

## data are features
data = np.array([], dtype=np.float32).reshape(0,1024) 
label = np.array([], dtype=np.float32).reshape(0)
for i in range(len(TEST_FILES_VEC[0])):
    data_temp, label_temp = FileIO.load_h5(TEST_FILES_VEC[0][i])
    data = np.concatenate((data, data_temp), axis=0)
    label = np.concatenate((label, label_temp), axis=0)

## data are point cloud 
#data = data[:,0:1024,:]
#data = data.reshape((-1,3*1024))

## data are features
data = np.concatenate([data, np.zeros((
                data.shape[0], 3*1024 - data.shape[1]))], axis  = -1)
#%%
## T-SNE to reduce dimension 
tsne = TSNE(n_components=2, learning_rate=100).fit_transform(data)
#%%
# plot T-SNE
            continue
        #result = ppool.apply_async(has_similar,[c_a,clauses])
        p = Process(target = has_similar,name = c_a,args=(c_a,clauses))
        process_list.append(p)
        p.start()
        print "-->",p.pid,"start"
        #if result:
        #    similar_clauses.append(c_a)

    for p in process_list:
        p.join()
        print "-->",p.name,"join"
        calculated_clauses.append(p.name)

if __name__ == "__main__":
    fio = FileIO()
    calculated_clauses = [] 
    #try:
    calculated_clauses = fio.readFileToList(CALCULATED_FILE)
    #except Exception,e:
    #    print e
    #    pass

    clause_freq = fio.readFileToDict(CLAUSE_FREQ_FILE)
    clause_freq = dict([str(k),v] for k,v in clause_freq.iteritems() if len(str(k)) > 3)#clause length more than 1 chinese word
    clauses = [str(k) for k in clause_freq.keys()]
    clauses = sorted(clauses,key = lambda x:len(x))#sort clause list by clause length
    clause_num = len(clauses)
    high_freq_clauses = [str(k) for k,v in clause_freq.iteritems() if v >2]
    low_freq_clauses = [str(k) for k,v in clause_freq.iteritems() if v <=2]
    def deploy(self, year, disallowed_week_indices):
        """
        Given the year or season which requires scraping and the forbidden weeks, deploys a spider that
        visits the vegasinsider.com website to acquire the betting data for the specified sport for the
        specified year and stores all relevant data to memory
        """
        # specify the different csv file names
        moneylines_csv_filename = \
            self.get_sport_name() + \
            " " + VIGlobals.MONEYLINE_CSV_SUFFIX + \
            " (" + str(year) + ").csv"

        moneylines_times_csv_filename = \
            self.get_sport_name() + \
            " " + VIGlobals.MONEYLINE_TIMES_CSV_SUFFIX + \
            " (" + str(year) + ").csv"

        spreads_csv_filename = \
            self.get_sport_name() + \
            " " + VIGlobals.SPREADS_CSV_SUFFIX + \
            " (" + str(year) + ").csv"

        spreads_times_csv_filename = \
            self.get_sport_name() + \
            " " + VIGlobals.SPREADS_TIMES_CSV_SUFFIX + \
            " (" + str(year) + ").csv"

        # This section of the code implements resume capabilities

        # if the read csv option is not enabled
        if not self.get_read_csv():
            # initialize all data matrices
            moneylines_matrix = list()
            ml_time_column = list([VIGlobals.MONEYLINE_TIMES_CSV_HEADER])

            spreads_matrix = list()
            ps_time_column = list([VIGlobals.SPREADS_TIMES_CSV_HEADER])

            final_data_matrix = list([VIGlobals.OVERALL_CSV_HEADER, list()])
        else:  # otherwise
            # load all data matrices from memory
            moneylines_matrix = self.read_csv(moneylines_csv_filename)

            ml_time_column = self.read_csv(moneylines_times_csv_filename)

            spreads_matrix = self.read_csv(spreads_csv_filename)

            ps_time_column = self.read_csv(spreads_times_csv_filename)

            final_data_matrix = self.get_csv_matrix()

        casinos = list()  # initialize an empty list to store the casino names as they appear

        week_idx = 0  # initialize the first week index
        for week in range(self.get_weeks_count()):  # for all possible week indices
            if week not in disallowed_week_indices:  # if the week index is not forbidden
                week_idx += 1  # increment the week index counter

                # construct the url for the specified season and week
                url = self.get_base_url() + self.get_branch_url() + "/week/" + str(week + 1) + "/season/" + str(year)
                driver = webdriver.Chrome(self.get_webdriver_path())  # create a Chrome webdriver
                driver.get(url)  # visit the website

                # retrieve the game tables on the website
                game_tables = driver.find_elements_by_xpath('.//td[@class="sportPicksBorder"]')
                self.display(len(game_tables))  # print the number of game tables present

                for game_table_index in range(len(game_tables)):  # for every game table
                    try:
                        # initialize the moneylines, spreads and final data lists
                        away_moneylines, home_moneylines = list([]), list([])
                        away_spreads, home_spreads = list([]), list([])
                        final_data_upper_row, final_data_lower_row = list([]), list([])

                        # create a new auxiliary Chromedriver
                        game_driver = webdriver.Chrome(self.get_webdriver_path())
                        game_driver.get(url)  # visit the website

                        # retrieve the game tables once more
                        new_game_tables = game_driver.find_elements_by_xpath('.//td[@class="sportPicksBorder"]')
                        game_table = new_game_tables[game_table_index]  # retrieve the current game table

                        # parse through the web elements to retrieve the away team and home team scores
                        away_score_row, home_score_row = game_table.find_elements_by_xpath('.//tr[@class="tanBg"]')
                        away_score = [str(web_element.text) for web_element in away_score_row.
                            find_elements_by_xpath('.//td[@class="sportPicksBorderL2 zerocenter"]')][-1]
                        home_score = [str(web_element.text) for web_element in home_score_row.
                            find_elements_by_xpath('.//td[@class="sportPicksBorderL zerocenter"]')][-1]

                        # parse through the web elements to find and click the line movement link
                        line_movement_row = game_table.find_element_by_xpath('.//tr[@class="bbg2"]')
                        line_movement_link = line_movement_row.find_element_by_xpath('.//a[@class="white"]')
                        line_movement_link.click()

                        # if the archive mode is enabled
                        if self.get_archive_mode():
                            # redirect to archive.org
                            game_driver.get('https://web.archive.org/web/' + str(game_driver.current_url))

                        # retrieve the information tables
                        info_tables = game_driver.find_element_by_xpath('.//div[@class="SLTables1"]').\
                            find_elements_by_xpath('.//table[@cellspacing=0]')

                        # retrieve the game title and datetime
                        game_title, game_datetime = info_tables[0], info_tables[1]

                        # retrieve the away team name and the home team name
                        away_team, home_team = [str(team_name) for team_name in
                                                str(game_title.find_element_by_xpath('.//font').text).split(' @ ')]

                        self.display(" ".join(list([away_team, away_score])))  # display the away team name and score
                        self.display(" ".join(list([home_team, home_score])))  # display the home team name and score

                        # parse the game date and time
                        game_date, game_time = \
                            [
                                str(game_info.text)
                                for game_info in game_datetime.find_elements_by_xpath('.//td[@valign="top"]')
                            ]

                        game_date, game_time = " ".join(game_date.split()[2:]), " ".join(game_time.split()[2:])

                        # display the game date and time
                        self.display(" ".join(list([game_date, game_time])))
                        self.display('\n')

                        # initialize empty lists to hold the underdog moneylines and point spreads
                        underdog_mls = list([])
                        underdog_pss = list([])

                        for info_table in info_tables[2:]:  # for every information table
                            # grab the casino name
                            casino_name = info_table.find_element_by_xpath('.//tr[@class="component_head"]').text
                            casino_name = casino_name[:len(casino_name) - 15]
                            self.display(casino_name)  # print the casino name

                            # if it is the first week and the first game table
                            if week_idx == 1 and game_table_index == 0:
                                casinos.append(casino_name)  # store the casino name

                            # retrieve a list of the table rows
                            table_rows = \
                                info_table\
                                    .find_element_by_xpath('.//table[@class="rt_railbox_border2"]')\
                                    .find_elements_by_xpath('.//tr')

                            # initialize the list of desired table rows for both moneylines and point spreads
                            ml_desired_table_rows = list([])
                            ps_desired_table_rows = list([])

                            # for every table row
                            for table_row in table_rows[2:]:
                                # retrieve the table row elements
                                table_row_elements = [str(table_row_element.text) for table_row_element in
                                                      table_row.find_elements_by_css_selector('td.bg2')]

                                # if the betting data was published before the start of the game and has the moneyline
                                if self.is_before_game(table_row_elements, game_date, game_time) and \
                                        self.has_money_line(
                                            table_row_elements,
                                            len(self.get_team_abbr()[away_team]),
                                            len(self.get_team_abbr()[home_team])
                                        ):
                                    # add it to the list of desired rows
                                    ml_desired_table_rows.append(table_row_elements)
                                # if the betting data was published before the start of game and has the point spread
                                if self.is_before_game(table_row_elements, game_date, game_time) and \
                                        self.has_point_spread(
                                            table_row_elements,
                                            len(self.get_team_abbr()[away_team]),
                                            len(self.get_team_abbr()[home_team])
                                        ):
                                    # add it to the list of desired rows
                                    ps_desired_table_rows.append(table_row_elements)

                            # if there are no desired rows, set the away and home moneylines to null strings
                            if len(ml_desired_table_rows) == 0:
                                away_moneyline = str('')
                                home_moneyline = str('')
                            else:  # otherwise
                                # select the most recent moneyline row
                                ml_desired_table_row = self.determine_ml_desired_table_row(ml_desired_table_rows)

                                underdog_ml = \
                                    self.determine_underdog_ml(
                                        ml_desired_table_row,
                                        away_team,
                                        home_team,
                                        self.get_team_abbr()
                                    )
                                underdog_mls.append(underdog_ml)  # check which team is the underdog

                                # display the most recent moneyline row before the game, taking the time
                                # threshold into account
                                self.display(" ".join(list(["Money Line Row:", str(ml_desired_table_row)])))

                                # extract the moneylines from this row
                                first_ml, second_ml = self.extract_moneyline(ml_desired_table_row,
                                                                             len(self.get_team_abbr()[away_team]),
                                                                             len(self.get_team_abbr()[home_team]))

                                # assign the correct moneyline to the correct team
                                if underdog_ml:
                                    away_moneyline = self.handle_moneyline_pk(first_ml)
                                    home_moneyline = self.handle_moneyline_pk(second_ml)
                                else:
                                    away_moneyline = self.handle_moneyline_pk(second_ml)
                                    home_moneyline = self.handle_moneyline_pk(first_ml)

                                if not self.is_within_time_period(ml_desired_table_row[0], ml_desired_table_row[1],
                                                                  self.rectify_date_format2(game_date),
                                                                  self.rectify_time_format(game_time)):
                                    away_moneyline = str('')
                                    home_moneyline = str('')

                                # store the moneyline times
                                ml_time_column.append(list([str(game_time), str(ml_desired_table_row[1])]))

                            # if there are no desired rows, set the away and home point spreads to null strings
                            if len(ps_desired_table_rows) == 0:
                                away_point_spread = str('')
                                home_point_spread = str('')
                            else:  # otherwise
                                # select the most recent point spread row
                                ps_desired_table_row = self.determine_ps_desired_table_row(ps_desired_table_rows)

                                underdog_ps = \
                                    self.determine_underdog_ps(
                                        ps_desired_table_row,
                                        away_team,
                                        home_team,
                                        self.get_team_abbr()
                                    )
                                underdog_pss.append(underdog_ps)  # check which team is the underdog

                                # display the most recent point spread row before the game, taking the time
                                # threshold into account
                                self.display(" ".join(list(["Point Spread Row:", str(ps_desired_table_row)])))

                                # extract the point spreads from this row
                                first_ps, second_ps = \
                                    self.extract_spreads(
                                        ps_desired_table_row,
                                        len(self.get_team_abbr()[away_team]),
                                        len(self.get_team_abbr()[home_team])
                                    )

                                # assign the correct point spread to the correct team
                                if underdog_ps:
                                    away_point_spread = self.handle_spread_pk(first_ps)
                                    home_point_spread = self.handle_spread_pk(second_ps)
                                else:
                                    away_point_spread = self.handle_spread_pk(second_ps)
                                    home_point_spread = self.handle_spread_pk(first_ps)

                                if not self.is_within_time_period(
                                        ps_desired_table_row[0],
                                        ps_desired_table_row[1],
                                        self.rectify_date_format2(game_date),
                                        self.rectify_time_format(game_time)
                                ):
                                    away_point_spread = str('')
                                    home_point_spread = str('')

                                # store the point spread times
                                ps_time_column.append(list([str(game_time), str(ps_desired_table_row[1])]))

                            # display the away and home moneylines and point spreads
                            self.display(" ".join(list(["Away Moneyline:", away_moneyline])))
                            self.display(" ".join(list(["Home Moneyline:", home_moneyline])))
                            self.display(" ".join(list(["Away Point Spread:", away_point_spread])))
                            self.display(" ".join(list(["Home Point Spread:", home_point_spread])))
                            self.display("\n")

                            # store the away and home moneylines and point spreads
                            away_moneylines.append(away_moneyline)
                            home_moneylines.append(home_moneyline)

                            away_spreads.append(away_point_spread)
                            home_spreads.append(home_point_spread)

                        # if this is the first moneyline row
                        if len(moneylines_matrix) == 0:
                            # add the moneyline heading first
                            moneylines_matrix.append(list(VIGlobals.MONEYLINE_CSV_HEADER) + casinos)
                            moneylines_matrix.append(list())

                        # if this is the first point spread row
                        if len(spreads_matrix) == 0:
                            # add the point spread heading first
                            spreads_matrix.append(list(VIGlobals.SPREADS_CSV_HEADER) + casinos)
                            spreads_matrix.append(list())

                        # print the list of away and home moneylines and point spreads
                        self.display("\n")
                        self.display(" ".join(list(["Away Moneylines:", str(away_moneylines)])))
                        self.display(" ".join(list(["Home Moneylines:", str(home_moneylines)])))
                        self.display(" ".join(list(["Away Point Spreads:", str(away_spreads)])))
                        self.display(" ".join(list(["Home Point Spreads:", str(home_spreads)])))
                        self.display("\n")

                        # determine the best moneyline for both teams
                        desired_max_away_moneyline = \
                            max([
                                float(moneyline)
                                for moneyline in away_moneylines
                                if moneyline != ''
                            ])
                        desired_max_home_moneyline = \
                            max([
                                float(moneyline)
                                for moneyline in home_moneylines
                                if moneyline != ''
                            ])

                        # determine the away and home payouts corresponding to the best moneylines
                        desired_away_moneyline_payout = self.compute_payout(desired_max_away_moneyline)
                        desired_home_moneyline_payout = self.compute_payout(desired_max_home_moneyline)

                        # determine the range of point spreads for both teams
                        desired_min_away_point_spread = \
                            min([
                                float(point_spread)
                                for point_spread in away_spreads
                                if point_spread != ''
                            ])
                        desired_max_away_point_spread = \
                            max([
                                float(point_spread)
                                for point_spread in away_spreads
                                if point_spread != ''
                            ])

                        desired_min_home_point_spread = \
                            min([
                                float(point_spread)
                                for point_spread in home_spreads
                                if point_spread != ''
                            ])
                        desired_max_home_point_spread = \
                            max([
                                float(point_spread)
                                for point_spread in home_spreads
                                if point_spread != ''
                            ])

                        # determine the underdog according to both moneylines and point spreads
                        underdog_consensus = self.determine_underdog_consensus(underdog_mls, underdog_pss)

                        # store the data appropriately depending on which team is the underdog
                        if underdog_consensus:
                            payouts_upper_row = \
                                list([self.rectify_date_format(game_date), away_team, '(Favorite)']) + away_moneylines
                            payouts_lower_row = list(['', home_team, '(Underdog)']) + home_moneylines

                            point_spreads_upper_row = \
                                list([self.rectify_date_format(game_date), away_team, '(Favorite)']) + away_spreads
                            point_spreads_lower_row = list(['', home_team, '(Underdog)']) + home_spreads

                            final_data_upper_row += \
                                list([
                                    self.rectify_date_format(game_date),
                                    away_team,
                                    '(Favorite)',
                                    away_score,
                                    str(desired_min_away_point_spread),
                                    str(desired_max_away_point_spread),
                                    str(desired_max_away_moneyline),
                                    str(desired_away_moneyline_payout)
                                ])
                            final_data_lower_row += \
                                list([
                                    '',
                                    home_team,
                                    '(Underdog)',
                                    home_score,
                                    str(desired_min_home_point_spread),
                                    str(desired_max_home_point_spread),
                                    str(desired_max_home_moneyline),
                                    str(desired_home_moneyline_payout)
                                ])
                        else:
                            payouts_upper_row = \
                                list([self.rectify_date_format(game_date), home_team, '(Favorite)']) + home_moneylines
                            payouts_lower_row = list(['', away_team, '(Underdog)']) + away_moneylines

                            point_spreads_upper_row = \
                                list([self.rectify_date_format(game_date), home_team, '(Favorite)']) + home_spreads
                            point_spreads_lower_row = list(['', away_team, '(Underdog)']) + away_spreads

                            final_data_upper_row += \
                                list([
                                    self.rectify_date_format(game_date),
                                    home_team,
                                    '(Favorite)',
                                    home_score,
                                    str(desired_min_home_point_spread),
                                    str(desired_max_home_point_spread),
                                    str(desired_max_home_moneyline),
                                    str(desired_home_moneyline_payout)
                                ])

                            final_data_lower_row += \
                                list([
                                    '',
                                    away_team,
                                    '(Underdog)',
                                    away_score,
                                    str(desired_min_away_point_spread),
                                    str(desired_max_away_point_spread),
                                    str(desired_max_away_moneyline),
                                    str(desired_away_moneyline_payout)
                                ])

                        # store the moneylines row for this game
                        moneylines_matrix.append(payouts_upper_row)
                        moneylines_matrix.append(payouts_lower_row)
                        moneylines_matrix.append(list())

                        # store the point spreads row for this game
                        spreads_matrix.append(point_spreads_upper_row)
                        spreads_matrix.append(point_spreads_lower_row)
                        spreads_matrix.append(list())

                        # store the overall data row for this game
                        final_data_matrix.append(final_data_upper_row)
                        final_data_matrix.append(final_data_lower_row)
                        final_data_matrix.append(list())

                        # close the auxiliary driver
                        game_driver.close()

                    except Exception as e:  # if an exception occurs
                        self.display("Something bad has happened!\n")  # print an error message
                        traceback.print_exc()  # print traceback
                        game_driver.close()  # close the auxiliary driver and retry

                driver.close()  # close the main Chromedriver

                # set the current csv matrix as the final data matrix
                self.set_csv_matrix(final_data_matrix)

                # write all relevant data to memory as csv files
                FileIO.write_csv(moneylines_csv_filename, moneylines_matrix)
                FileIO.write_csv(moneylines_times_csv_filename, ml_time_column)

                FileIO.write_csv(spreads_csv_filename, spreads_matrix)
                FileIO.write_csv(spreads_times_csv_filename, ps_time_column)

                FileIO.write_csv(self.get_target_csv(), self.get_csv_matrix())
예제 #30
0
    def deploy(self, data_matrix, start_game_index):
        """
        Given the data matrix of games and the starting game index, deploys a spider that visits the
        killersports.com website to acquire the win percentage for each of the two teams playing each
        match and stores all relevant data to memory
        """
        new_data_matrix = self.get_csv_matrix()  # get the current csv matrix

        # for every game
        for game_index in range(start_game_index, len(data_matrix), 3):
            dates = self.extract_dates(new_data_matrix)  # get a list of past dates

            # if the current game was not held in a past date
            if data_matrix[game_index][0] not in dates or game_index == start_game_index:
                fav_spreads_map = dict({})  # reinitialize the data structures
                dog_spreads_map = dict({})

            self.display(dates)  # display the dates and game index
            self.display(game_index)

            fav_row = list(data_matrix[game_index])  # extract the favorite, underdog and spacer row
            dog_row = list(data_matrix[game_index + 1])
            spacer_row = list(data_matrix[game_index + 2])

            current_date = fav_row[0]  # get the date for the current game

            self.display(current_date)
            self.display(fav_row)
            self.display('\n')

            # for every spread in the favorite row
            for spread_index in range(3, len(fav_row)):
                if len(fav_row[spread_index]) > 0:  # if the point spread is not a null string
                    self.display('Favorite')
                    current_spread = float(fav_row[spread_index])  # get the current spread
                    if current_spread not in fav_spreads_map.keys():  # if the win to loss stats are not available
                        driver = webdriver.Chrome(self.get_webdriver_path())  # create a Chrome webdriver
                        url = self.get_base_url() + self.get_branch_url()  # construct the url
                        for trial_idx in range(self.get_ntrials()):  # try the specified number of times
                            try:
                                driver.get(url)  # visit the killersports website

                                sleep(self.get_query_timeout())  # sleep for the specified number of seconds

                                # build the search query
                                query = self.create_exact_query(current_date, current_spread)

                                self.display(query)

                                # find the query box and type in the query
                                text_field = driver.find_element_by_id('sdql')
                                text_field.send_keys(query)

                                # find the query submit button and click it
                                button = driver.find_element_by_name('submit')
                                button.click()

                                # retrieve the win-loss-tie stats for the given point spread
                                su_element = driver.find_element_by_xpath\
                                    ('/html/body/div[@id="content"]/table/tbody/tr[2]/td/table/tbody/tr[1]/td[1]')

                                su_text = str(su_element.text)  # convert the web element to text
                                # parse the text to extract the win-loss-tie stats
                                wlt = [int(num) for num in su_text.split()[0].split('-')]

                                driver.close()  # close the Chrome webdriver

                                sample_size = wlt[0] + wlt[1]  # compute the sample size

                                # include the win-loss-tie stats for the current spread in the favorite row
                                fav_row[spread_index] = fav_row[spread_index] + ';' + str(wlt[0]) + '-' + str(
                                    wlt[1]) + '-' + str(sample_size)

                                # store the win-loss-tie stats for the current spread for both the favorite
                                # and the underdog teams
                                fav_spreads_map[current_spread] = tuple((int(wlt[0]), int(wlt[1])))
                                dog_spreads_map[-current_spread] = tuple((int(wlt[1]), int(wlt[0])))

                                break  # no more trials are required

                            except Exception as e:  # in the case of an Exception
                                self.display('\n')
                                # self.display(fav_row)
                                self.display(traceback.print_exc())
                                self.display('Exception encountered: No games of desired spread found in sample!')
                                self.display('Setting data to null and exiting...')
                                self.display('\n')

                                # if all trials have been exhausted
                                if trial_idx == self.get_ntrials() - 1:
                                    # store the win-loss-tie stats as 0-0-0
                                    fav_row[spread_index] = fav_row[spread_index] + ';' + str('0-0-0')
                                    # dog_row[spread_index] = dog_row[spread_index] + ';' + str('0-0-0')
                                    fav_spreads_map[current_spread] = tuple((0, 0))
                                    dog_spreads_map[-current_spread] = tuple((0, 0))
                                    driver.close()  # close the Chrome webdriver
                    else:  # if the current spread stats have already been scraped
                        # use the stored win-loss-tie stats for current spread
                        fav_win_count, fav_loss_count = fav_spreads_map[current_spread]
                        sample_size = fav_win_count + fav_loss_count
                        fav_row[spread_index] = fav_row[spread_index] + ';' + str(fav_win_count) + '-' + \
                                                str(fav_loss_count) + '-' + str(sample_size)
                        # dog_row[spread_index] = dog_row[spread_index] + ';' + str(fav_loss_count) + '-' + \
                        #                         str(fav_win_count) + '-' + str(sample_size)

                self.display(fav_row)
                self.display(dog_row)
                self.display(spacer_row)
                self.pdisplay(fav_spreads_map)
                self.pdisplay(dog_spreads_map)
                self.display('\n')

            # for every spread in the underdog row
            for spread_index in range(3, len(dog_row)):
                if len(dog_row[spread_index]) > 0:  # if the point spread is not a null string
                    self.display('Underdog')
                    current_spread = float(dog_row[spread_index])  # get the current spread
                    if current_spread not in dog_spreads_map.keys():  # if the win to loss stats are not available
                        driver = webdriver.Chrome(self.get_webdriver_path())  # create a Chrome webdriver
                        url = self.get_base_url() + self.get_branch_url()  # construct the url
                        for trial_idx in range(self.get_ntrials()):  # try the specified number of times
                            try:
                                driver.get(url)  # visit the killersports website

                                sleep(self.get_query_timeout())  # sleep for the specified number of seconds

                                # build the search query
                                query = self.create_exact_query(current_date, current_spread)

                                self.display(query)

                                # find the query box and type in the query
                                text_field = driver.find_element_by_id('sdql')
                                text_field.send_keys(query)

                                # find the query submit button and click it
                                button = driver.find_element_by_name('submit')
                                button.click()

                                # retrieve the win-loss-tie stats for the given point spread
                                su_element = driver.find_element_by_xpath \
                                    ('/html/body/div[@id="content"]/table/tbody/tr[2]/td/table/tbody/tr[1]/td[1]')

                                su_text = str(su_element.text)  # convert the web element to text
                                # parse the text to extract the win-loss-tie stats
                                wlt = [int(num) for num in su_text.split()[0].split('-')]

                                driver.close()  # close the Chrome webdriver

                                sample_size = wlt[0] + wlt[1]  # compute the sample size

                                # fav_row[spread_index] = fav_row[spread_index] + ';' + str(wlt[0]) + '-' +
                                # str(wlt[1]) + '-' + str(sample_size)

                                # include the win-loss-tie stats for the current spread in the underdog row
                                dog_row[spread_index] = dog_row[spread_index] + ';' + str(wlt[0]) + '-' + str(
                                    wlt[1]) + '-' + str(sample_size)

                                # store the win-loss-tie stats for the current spread for both the favorite
                                # and the underdog teams
                                fav_spreads_map[-current_spread] = tuple((int(wlt[1]), int(wlt[0])))
                                dog_spreads_map[current_spread] = tuple((int(wlt[0]), int(wlt[1])))

                                break  # no more trials are required

                            except Exception as e:  # in the case of an Exception
                                self.display('\n')
                                # self.display(dog_row)
                                self.display(traceback.print_exc())
                                self.display('Exception encountered: No games of desired spread found in sample!')
                                self.display('Setting data to null and exiting...')
                                self.display('\n')

                                # if all trials have been exhausted
                                if trial_idx == self.get_ntrials() - 1:
                                    # fav_row[spread_index] = fav_row[spread_index] + ';' + str('0-0-0')

                                    # store the win-loss-tie stats as 0-0-0
                                    dog_row[spread_index] = dog_row[spread_index] + ';' + str('0-0-0')
                                    fav_spreads_map[-current_spread] = tuple((0, 0))
                                    dog_spreads_map[current_spread] = tuple((0, 0))
                                    driver.close()  # close the Chrome webdriver
                    else:  # if the current spread stats have already been scraped
                        # use the stored win-loss-tie stats for current spread
                        dog_win_count, dog_loss_count = dog_spreads_map[current_spread]
                        sample_size = dog_win_count + dog_loss_count
                        # fav_row[spread_index] = fav_row[spread_index] + ';' + str(fav_win_count) + '-' + \
                        #                         str(fav_loss_count) + '-' + str(sample_size)
                        dog_row[spread_index] = dog_row[spread_index] + ';' + str(dog_win_count) + '-' + \
                                                str(dog_loss_count) + '-' + str(sample_size)

                self.display(fav_row)
                self.display(dog_row)
                self.display(spacer_row)
                self.pdisplay(fav_spreads_map)
                self.pdisplay(dog_spreads_map)
                self.display('\n')

            new_data_matrix.append(fav_row)  # store the modified favorite, underdog and spacer rows
            new_data_matrix.append(dog_row)
            new_data_matrix.append(spacer_row)

            self.set_csv_matrix(new_data_matrix)  # set the new csv matrix as the current csv matrix
            FileIO.write_csv(self.get_csv_matrix(), self.get_target_csv())  # store the current csv matrix
예제 #31
0
if __name__ == '__main__':
    from PhotoDao import PhotoDao;
    from DBHelper import DBHelper;
    db_helper = DBHelper();
    root = '/nas02/home/h/o/hongtao/Iconic';
    db_helper.init(root);

    query = 'love';
    photo_dao = PhotoDao(db_helper);
    photo_ids = photo_dao.getClassPhotoIds(query, ''.join([query]));
#    photo_ids = photo_ids[0:100];
#    photos = photo_dao.getPhotos(query, photo_ids);

    photos = getPhotosMultiThread(root, query, photo_ids);
    print('obtain ' + str(len(photos)) + ' photos.'); 

    top_tagfile = '/nas02/home/h/o/hongtao/magiconic/FlickrDownloader/tmp_dir/data/tags/%s.txt' % query;
    fin = open(top_tagfile, 'r');
    for tag in fin:
        top_tags.append(tag.strip());
    photos = filter_photos_with_top_tags(photos);
    print('after filter image photos, ' + str(len(photos)) + ' images left.'); 
    new_photo_ids = [];
    for photo in photos:
        new_photo_ids.append(photo.photoId);
    output_filepath = './tmp_%s_top_1000_label.txt' % query;
    from FileIO import FileIO;
    file_io = FileIO();
    file_io.write_strings_to_file(new_photo_ids, output_filepath);
예제 #32
0
class HexClass(object):

    def __init__(self):
        self.hexConversionTable = {DOT_HEX : [DOT_MIF, DOT_INO, DOT_A51]}
        self.fio = FIO()

    def fetchMifParams(self):
        mifTop = Toplevel()
        self.mifApp = MifUI(mifTop)
        self.mifApp.mainloop()
        mifParams = self.mifApp.getParameters()
        mifTop.destroy()
        self.depth = int(mifParams[0])
        self.width = int(mifParams[1])
        self.address_radix = int(mifParams[2])
        self.data_radix = int(mifParams[3])
        self.fillZeros = int(mifParams[4])

    def convert(self, toFileType):
        self.fio.openFile(exten=DOT_HEX, ftypes=[('Hex files', DOT_HEX), ('all files', DOT_ALL)])
        fromFile = self.fio.getOpenedFile()
        self.phf = ParseHexFile(fromFile)
        self.fio.closeOpened()

        self.fio.saveFile(exten=toFileType, ftypes=[(toFileType.strip('.') + ' files', toFileType),
                                    ('all files', DOT_ALL)], ifilen='myfile' + toFileType)
        toFile = self.fio.getSavedFile()


        if toFileType == DOT_MIF:
            try:
                self.hexToMif(toFile)
            except:
                self.fio.errorPopup('Something\'s wrong with the Mif parameters!')
        elif toFileType == DOT_INO:
            self.hexToIno(toFile)
        elif toFileType == DOT_A51:
            self.hexToA51(toFile)

        self.fio.closeSaved()

    def hexToMif(self, mifFile):
        self.fetchMifParams()
        self.phf.setByteWidth(self.width)
        mifFile.write('DEPTH = {};\nWIDTH = {};\n'.format(str(self.depth),str(self.width)) +
                           'ADDRESS_RADIX = {};\n'.format(str(self.address_radix)) +
                           'DATA_RADIX = {};\n'.format((self.data_radix)) +
                           'CONTENT\nBEGIN\n')

        mifLineCount = 0
        for i in range(len(self.phf.hexLen)):
            tempAddress = []
            for j in range(int(self.phf.hexLen[i],self.data_radix)):
                addr = int(self.phf.hexAddr[i],self.data_radix)+j
                tempAddress.append(addr)
                if addr > mifLineCount:
                    mifLineCount = addr
            for k in range(len(tempAddress)):
                mifFile.write(str(hex(tempAddress[k])).replace('0x','') +
                                   '\t:\t' + str(self.phf.hexData[i][k]) + ';\n')
        # Fill in the rest of the addresses with 00
        if mifLineCount < int(self.depth) and self.fillZeros == 1:
            for i in range(int(self.depth) - mifLineCount):
                mifFile.write(str(hex(mifLineCount + i + 1)).replace('0x','') +
                                   '\t:\t' + '00' + ';\n')
        mifFile.write('END;')
        mifFile.close()

    def hexToIno(self, inoFile, dataArrFormat = None, printArr = False):
        if dataArrFormat == None:
            format = ['dpl', 'dph', 'ndb', 'dat']
        else:
            format = dataArrFormat
        orgHexData = self.phf.structureHexContents(format)
        arrayStr, arrLen = lst2d2str(orgHexData, pad=False)

        inoFile.write(arduinoCode(arrLen, arrayStr, str(format) + " (repeated for however many hex lines where made)"))

        if printArr:
            print(arrayStr, "\n" + str(format) + " (repeated for however many hex lines where made)")

        inoFile.close()

    def hexToA51(self, a51File):
        try:
            instruction_table = pickle.load(open("instruction_table.p", 'rb'))
        except:
            print("No instruction_table file found.")
            return

        hexLetters = ['A', 'B', 'C', 'D', 'E', 'F']
        csegs = self.phf.getHexAddr()
        dataLines = self.phf.getHexData()
        print(1)
        for i, memLoc in enumerate(csegs):
            print(2)
            if i == 0:
                a51File.write('\n\ncseg ' + str(memLoc) + '\n')
            elif int(str(memLoc), 16) - 16 != csegs[i-1]:
                a51File.write('\n\ncseg ' + memLoc + '\n')

            print(3)
            paramCnt = 0
            paramWrtn = 0
            params = []
            cmd = ''
            cmdDone = True
            currInst = None
            for j, dat in enumerate(dataLines[i]):
                if cmdDone:
                    currInst = instruction_table[dat]
                    cmd = currInst[0]
                    paramCnt = currInst[1] - 1
                    print("param Count", paramCnt)
                    cmdDone = False

                if paramCnt == 0:
                    if len(params) == 1:
                        for hl in hexLetters:
                            lett = params[0].find(hl)
                            if lett == 0:
                                params[0] = '0' + params[0]
                        cmd = cmd % str(params[0])
                        print(cmd)
                        a51File.write(cmd + '\n')
                    elif len(params) == 2:
                        for p in range(2):
                            for hl in hexLetters:
                                lett = params[p].find(hl)
                                if lett == 0:
                                    params[p] = '0' + params[p]

                        cmd = cmd %(str(params[0]), str(params[1]))
                        print(cmd)
                        a51File.write(cmd + '\n')
                    else:
                        a51File.write(cmd + '\n')
                    cmdDone = True
                    params = []

                else:
                    cmdDone = False
                    params.append(dat)
                    paramCnt -= 1

        a51File.close()
예제 #33
0
파일: pileup.py 프로젝트: mtstamp/stamp
def generate_pileup_file(sample_name, outpath, gzip_pileup, alignment_file, alignment_summary, probe_file, mtdna_refseq, qual_min, mtdna_offset):
	"""
	a function wrapper to generate the pileup file from the alignment file using samtools
	mtdna positions are corrected to those of rCRS
	
	Arguments
	----------
	sample_name: the name of the sample
	outpath: the path to store the output files
	gzip_pileup: compress the output file
	alignment_file: the processed alignment file returned from filter_alignment_file(...)
	alignment_summary: a dict of read summary returned from filter_alignment_file(...)
	probe_file: the path to the probe file
	mtdna_refseq: the mtdna reference sequence
	qual_min: the minimum quality score to output (used in samtools mpileup -Q )
	mtdna_offset: the position offset used to parse mtdna sites.
	
	Returns
	----------
	None
	
	Outputs
	----------
	${output}/${sample_name}.mtdna.consensus.adj.pileup(.gz): the resulting pileup file
	${output}/${sample_name}.coverage: the read coverage information for all mtdna sites (tsv file)  
	
	"""
	
	#parse amplicon information
	mtdna_len = 16569
	read_len = 250
	
	#amplicon information for each position in mtdna
	amp_info = [[] for i in xrange(mtdna_len+mtdna_offset+1)] #amplicon covered at each position
	amp_cov = [0,]*(mtdna_len+mtdna_offset+1) #amplicons sequencing depth at each position
	amp_r1_pos = [mtdna_len,]*(mtdna_len+mtdna_offset+1) #relative position at read 1
	amp_r2_pos = [mtdna_len,]*(mtdna_len+mtdna_offset+1) #relative position at read 2
	amp_r1_probe = [mtdna_len,]*(mtdna_len+mtdna_offset+1) #relative position to the r1 probe
	amp_r2_probe = [mtdna_len,]*(mtdna_len+mtdna_offset+1) #relative position to the r2 probe
	
	#parse probe file for amplicon imformation
	with open(probe_file, "r") as fh:
		for line in fh:
			line = line.rstrip("\r\n")
			if (not line):
				continue
			name, chr, start, end, s1, s2, r1_probe, r2_probe, blen = line.split("\t")
			start = int(start)
			end = int(end)
			#length of r1 and r2 probes
			if (s1 == "+"):
				p1 = len(r1_probe.strip())
				p2 = len(r2_probe.strip())
			else:
				p1 = len(r2_probe.strip())
				p2 = len(r1_probe.strip())
			#barcode length
			blen = int(blen)
			if (chr == "chrM"):
				#number of amplicons in the QC+ bam file
				cov = alignment_summary.get(name, 0)
				amp = []
				if (start < 0):
					#split the amplicon into halves in the D-loop region
					amp.append([mtdna_len+start, mtdna_len, s1, p1, 0])
					amp.append([1, end, s1, 0, p2])
				else:
					amp.append([start, end, s1, p1, p2])
				#positions in r1 and r2 reads
				#positions in probe
				for start, end, s1, p1, p2 in amp:
					for i in range(start+p1, end-p2+1):
						amp_info[i].append("%s(%s)"%(name,s1))
						amp_cov[i] += cov
					if (s1 == "+"):
						for i in xrange(p1):
							amp_r1_probe[i+start] = p1-i #position in R1 probe
						for i in xrange(start+p1, end-p2+1):
							amp_r1_pos[i] = min(amp_r1_pos[i], i-start) #position in R1
							amp_r2_pos[i] = min(amp_r2_pos[i], end+1-i+blen) #position in R2
						for i in xrange(p2): #position in R2 probe
							amp_r2_probe[end-i] = p2-i
					else:
						for i in xrange(p1):
							amp_r2_probe[i+start] = p1-i
						for i in xrange(start+p1, end-p2+1):
							amp_r2_pos[i] = min(amp_r2_pos[i], i-start+blen)
							amp_r1_pos[i] = min(amp_r1_pos[i], end+1-i)
						for i in xrange(p2):
							amp_r1_probe[end-i] = p2-i
	
	if (alignment_file.endswith(".bam")):
		alignment_file = alignment_file[:-4]
	
	#sort reads according to the aligned mtDNA positions
	#execute("%s sort -o %s.sorted.bam %s.bam " % (samtools, alignment_file, alignment_file))
	
	#pileup reads using samtools
	#mapq >= 20 & baseq >= qual_min
	#pf = pipe_output("%s mpileup -q 20 -Q %d -B -d 500000 -f %s %s.sorted.bam" % (samtools, qual_min, mtdna_refseq, alignment_file))
	pf = pipe_output("%s mpileup -q 20 -Q %d -B -d 500000 -f %s %s.bam" % (samtools, qual_min, mtdna_refseq, alignment_file))
	
	#summarize site coverage
	out_coverage = open(outpath + os.path.sep + sample_name + ".coverage", "w")
	head = ["chr", "pos", "pos.adj", "ref", "depth", "Q0", "Q1", "Q2", "Q3", "Q4", "amps", "amp.r1.pos","amp.r2.pos","amp.r1.probe","amp.r2.probe","amp.info"]
	out_coverage.write("\t".join(head) + "\n")
	
	#trim and move the shifted reads to the correct rCRS positions
	out_name =  outpath + os.path.sep + sample_name + ".mtdna.consensus.adj.pileup"
	if (gzip_pileup):
		out_name += ".gz"
	out_pileup = FileIO(out_name, "w", compresslevel=3)
	
	#temporarily store amplicons mapped to the end of the shifted mtDNA (the last mtdna_offset bps)
	tmp_line = {}
	
	#iterate reads in the pileup file generated
	for line in pf.stdout:
		line = line.rstrip("\r\n")
		if (not line):
			continue
		chr, pos, ref, depth, r, q = line.split("\t")
		qual = [0,0,0,0,0]
		#group quals into <10, 10-20, 20-30, 30-40, >40
		for i in phred(q):
			i = int(i)/10
			if (i >= 4):
				i = 4
			qual[i] += 1
		pos = int(pos)
		depth = int(depth)
		pos_adj = pos - mtdna_offset
		if (pos_adj > 0):
			l = tmp_line.get(pos_adj)
			if (l):
				chr1, ref1, depth1, qual1, r1, q1 = l
				assert ref1 == ref, "the reference allele does not match at position %d" % pos_adj
				#pileup reads if they aligned to the same positions
				depth = int(depth) + int(depth1)
				#concatenate reads and read qualities 
				r += r1
				q += q1
				#sum up quality stats
				qual = [i+j for i,j in zip(qual, qual1)]
				#delete temp records for the position
				del tmp_line[pos_adj]
			#output coverage and quality stats
			out_coverage.write("\t".join(map(str, [chr, pos, pos_adj, ref, depth] + qual + [amp_cov[pos_adj], amp_r1_pos[pos_adj], amp_r2_pos[pos_adj], amp_r1_probe[pos_adj], amp_r2_probe[pos_adj],"|".join(amp_info[pos_adj])]))+"\n")
			#output reads
			out_pileup.write("\t".join([chr, str(pos_adj), ref, str(depth), r, q])+"\n")
		else:
			#temporarily store reads aligned to the last mtdna_offset bps
			pos = mtdna_len + pos_adj
			tmp_line[pos] = [chr, ref, depth, qual, r, q]
	if (tmp_line):
		#output reads aligned to the last mtdna_offset bps
		for pos_adj in sorted(tmp_line.keys()):
			chr, ref, depth, qual, r, q = tmp_line[pos_adj]
			out_coverage.write("\t".join(map(str, [chr, pos_adj-mtdna_len, pos_adj, ref, depth] + qual + [amp_cov[pos_adj], amp_r1_pos[pos_adj], amp_r2_pos[pos_adj], amp_r1_probe[pos_adj], amp_r2_probe[pos_adj],"|".join(amp_info[pos_adj])]))+"\n")
			out_pileup.write("\t".join([chr, str(pos_adj), ref, str(depth), r, q])+"\n")
	
	#close file handles
	pf.stdout.close()
	out_pileup.close()
	out_coverage.close()
예제 #34
0
class MifUI(Frame):
    def __init__(self, master=None):
        Frame.__init__(self, master)
        master.title('MIF file Hex Parameters')
        self.pack()

        self.parameters = [
            'depth', 'width', 'address_radix', 'data_radix', 'zero_fill'
        ]

        self.fio = FIO()

        self.mainFrame()

    def mainFrame(self):

        depthFrame = Frame(self)
        depthValue = StringVar()
        depthLabel = Label(depthFrame, text='Mem Depth')
        depthLabel.pack(side='left', fill=BOTH)
        self.depthEntry = Entry(depthFrame, textvariable=depthValue)
        self.depthEntry.pack(side='right', fill=BOTH)
        depthFrame.pack(fill=BOTH)

        widthFrame = Frame(self)
        widthValue = StringVar()
        widthLabel = Label(widthFrame, text='Mem width')
        widthLabel.pack(side='left')
        self.widthEntry = Entry(widthFrame, textvariable=widthValue)
        self.widthEntry.pack(side='right', fill=BOTH)
        widthFrame.pack(fill=BOTH)

        addRadFrame = Frame(self)
        addRadValue = StringVar()
        addRadLabel = Label(addRadFrame, text='Mem address Radix')
        addRadLabel.pack(side='left')
        self.addRadEntry = Entry(addRadFrame, textvariable=addRadValue)
        self.addRadEntry.pack(side='right', fill=BOTH)
        addRadFrame.pack(fill=BOTH)

        datRadFrame = Frame(self)
        datRadValue = StringVar()
        datRadLabel = Label(datRadFrame, text='Mem data Radix')
        datRadLabel.pack(side='left')
        self.datRadEntry = Entry(datRadFrame, textvariable=datRadValue)
        self.datRadEntry.pack(side='right', fill=BOTH)
        datRadFrame.pack(fill=BOTH)

        zeroFillFrame = Frame(self)
        self.zeroFillValue = IntVar()
        self.zeroFillCB = Checkbutton(zeroFillFrame, text='Pad trailing zeros?', variable=self.zeroFillValue, \
                                 onvalue = 1, offvalue = 0)
        self.zeroFillCB.pack()
        zeroFillFrame.pack(fill=BOTH)

        setParamButton = Button(self,
                                text='Set Parameters',
                                command=self.getFieldEntries)
        setParamButton.pack()

    def getFieldEntries(self):
        try:
            self.parameters[0] = int(self.depthEntry.get())
            self.parameters[1] = int(self.widthEntry.get())
            self.parameters[2] = self.addRadEntry.get()
            self.parameters[3] = self.datRadEntry.get()
            self.parameters[4] = self.zeroFillValue.get()
            self.quit()
        except:
            self.fio.errorPopup('Depth and Width should be integers!')

    def getParameters(self):
        return self.parameters
        '''
from FileIO import FileIO
from RepoGlobals import RepoGlobals
from NBA.NBAGlobals import NBAGlobals
from KillerSportsSpider import KSSpider
from KillerSportsSpider import KSGlobals
from VegasInsiderSpider import VIGlobals

START_GAME_INDEX = 0  # specify the starting game index

# read the spreads data matrix
DATA_MATRIX = FileIO.read_csv(" ".join(
    [NBAGlobals.SPORT_NAME, VIGlobals.SPREADS_CSV_SUFFIX, "(Aggregate).csv"]))

# initialize the target csv
TARGET_CSV = " ".join(
    [NBAGlobals.SPORT_NAME, KSGlobals.KS_CSV_SUFFIX, "(Aggregate).csv"])

nba_spider = \
    KSSpider(
        target_csv=TARGET_CSV,
        webdriver_path=RepoGlobals.WEBDRIVER_PATH,
        branch_url=NBAGlobals.KS_BRANCH_URL,
        read_csv=False,
        query_timeout=15,
        ntrials=5,
        verbose=True
    )  # initialize the nba ks spider

nba_spider.deploy(DATA_MATRIX, START_GAME_INDEX)  # deploy the nba ks spider
예제 #36
0
class MifUI(Frame):

    def __init__(self, master=None):
        Frame.__init__(self, master)
        master.title('MIF file Hex Parameters')
        self.pack()

        self.parameters = ['depth','width','address_radix','data_radix', 'zero_fill']

        self.fio = FIO()

        self.mainFrame()

    def mainFrame(self):

        depthFrame = Frame(self)
        depthValue = StringVar()
        depthLabel = Label(depthFrame, text = 'Mem Depth')
        depthLabel.pack(side = 'left',fill=BOTH)
        self.depthEntry = Entry(depthFrame, textvariable = depthValue)
        self.depthEntry.pack(side = 'right',fill=BOTH)
        depthFrame.pack(fill=BOTH)

        widthFrame = Frame(self)
        widthValue = StringVar()
        widthLabel = Label(widthFrame, text = 'Mem width')
        widthLabel.pack(side = 'left')
        self.widthEntry = Entry(widthFrame, textvariable = widthValue)
        self.widthEntry.pack(side = 'right',fill=BOTH)
        widthFrame.pack(fill=BOTH)

        addRadFrame = Frame(self)
        addRadValue = StringVar()
        addRadLabel = Label(addRadFrame, text = 'Mem address Radix')
        addRadLabel.pack(side = 'left')
        self.addRadEntry = Entry(addRadFrame, textvariable = addRadValue)
        self.addRadEntry.pack(side = 'right',fill=BOTH)
        addRadFrame.pack(fill=BOTH)

        datRadFrame = Frame(self)
        datRadValue = StringVar()
        datRadLabel = Label(datRadFrame, text = 'Mem data Radix')
        datRadLabel.pack(side = 'left')
        self.datRadEntry = Entry(datRadFrame, textvariable = datRadValue)
        self.datRadEntry.pack(side = 'right',fill=BOTH)
        datRadFrame.pack(fill=BOTH)

        zeroFillFrame = Frame(self)
        self.zeroFillValue = IntVar()
        self.zeroFillCB = Checkbutton(zeroFillFrame, text='Pad trailing zeros?', variable=self.zeroFillValue, \
                                 onvalue = 1, offvalue = 0)
        self.zeroFillCB.pack()
        zeroFillFrame.pack(fill=BOTH)

        setParamButton = Button(self, text = 'Set Parameters', command = self.getFieldEntries)
        setParamButton.pack()

    def getFieldEntries(self):
        try:
            self.parameters[0] = int(self.depthEntry.get())
            self.parameters[1] = int(self.widthEntry.get())
            self.parameters[2] = self.addRadEntry.get()
            self.parameters[3] = self.datRadEntry.get()
            self.parameters[4] = self.zeroFillValue.get()
            self.quit()
        except:
            self.fio.errorPopup('Depth and Width should be integers!')

    def getParameters(self):
        return self.parameters

        '''
    print('mean \u00B1std: {} \u00B1 {}'.format(predicted.mean(),
                                                predicted.std()))
    print('error(%): {} \u00B1 {}'.format(
        (abs(predicted - true_value) / true_value).mean() * 100,
        (abs(predicted - true_value) / true_value).std() * 100))
    print('quartile error(%): {} \u00B1 {}'.format(
        np.quantile((abs(predicted - true_value) / true_value), 0.25) * 100,
        np.quantile((abs(predicted - true_value) / true_value), 0.75) * 100))


if __name__ == '__main__':
    # case1_1D_D1_c0, case2_sphere_D1_c0, case3_2D_D1_c0,
    forward_folder = '../data/case3_sphere/forward1/'
    inverse_folder = '../data/case3_sphere/inverse1/'

    fileio = FileIO()
    fileio.assign_forward_folder(forward_folder)
    fileio.assign_inverse_folder(inverse_folder)
    i = 1

    fhn_model_instances = fileio.read_physics_model_instance(i, model='fhn')
    fhn_dl_model_instances = fileio.read_inverse_physics_model_instance(
        i, model='fhn')
    diffusion_model_instances = fileio.read_physics_model_instance(
        i, model='diffusion')
    diffusion_dl_model_instances = fileio.read_inverse_physics_model_instance(
        i, model='diffusion')
    point_cloud_instances = fileio.read_point_cloud_instance(i)

    coord = point_cloud_instances['coord']
    t = fhn_model_instances['t']
예제 #38
0
    def compute_win_percentages(self, weight_function, coefficients,
                                weight_fn_str):
        """
        Given a weight function type (identity, polynomial or exponential), the coefficients of the weight
        function and the string representation of the weight function, computes the win percentages of all
        games in the overall csv and writes the resulting csv to memory
        """
        target_csv_name = \
            " ".join(list([
                self.get_sport_name(),
                WPGlobals.FINAL_KS_CSV_SUFFIX,
                "(" + ", ".join(list([str(self.get_time_period()), weight_fn_str])) + ").csv"
            ]))  # construct the name of the target csv

        overall_csv = self.get_overall_csv(
        )  # get the overall and the killersports csv
        ks_csv = self.get_ks_csv()

        target_csv = list([WPGlobals.FINAL_KS_CSV_HEADER
                           ])  # initialize the target csv data matrix

        for idx in range(1,
                         len(overall_csv)):  # for all rows in the overall csv
            target_csv.append(list(
                overall_csv[idx]))  # add a duplicate row to the target csv

        for row_idx in range(2, len(ks_csv), 3):  # for every game
            # retrieve the favorite, underdog and spacer rows from the killersports csv
            fav_ks_row = ks_csv[row_idx]
            dog_ks_row = ks_csv[row_idx + 1]
            ks_spacer_row = ks_csv[row_idx + 2]

            preprocessed_fav_ks_row = list(
            )  # initialize the new favorite and underdog rows
            preprocessed_dog_ks_row = list()

            fav_spread_map = dict(
                {})  # initialize the favorite and underdog spread maps
            dog_spread_map = dict({})

            self.display(row_idx)
            self.display(fav_ks_row)
            self.display(dog_ks_row)
            self.display(ks_spacer_row)

            for element_idx in range(
                    3,
                    len(fav_ks_row)):  # for every spread in the favorite row
                # retrieve the spread element from both the favorite and underdog rows
                fav_ks_element, dog_ks_element = fav_ks_row[
                    element_idx], dog_ks_row[element_idx]
                # if the elements are not null strings
                if fav_ks_row[element_idx] != '' and dog_ks_row[
                        element_idx] != '':
                    fav_spread, fav_wlt = fav_ks_element.split(
                        ';')  # separate the spread from the win-loss-tie stats
                    dog_spread, dog_wlt = dog_ks_element.split(';')
                    # if the favorite and underdog spreads are equal
                    if abs(float(fav_spread)) == abs(float(dog_spread)):
                        preprocessed_fav_ks_row.append(
                            fav_ks_element
                        )  # add the spread element to the rows
                        preprocessed_dog_ks_row.append(dog_ks_element)

            # for every spread element in the preprocessed row
            for element_idx in range(len(preprocessed_fav_ks_row)):
                fav_ks_element, dog_ks_element = preprocessed_fav_ks_row[
                    element_idx], preprocessed_dog_ks_row[
                        element_idx]  # retrieve the spread element from both the favorite and underdog rows
                fav_spread, fav_wlt = fav_ks_element.split(
                    ';')  # separate the spread from the win-loss-tie stats
                dog_spread, dog_wlt = dog_ks_element.split(';')

                # parse out the win-loss-tie stats separately
                fav_wlt = [float(element) for element in fav_wlt.split('-')]
                dog_wlt = [float(element) for element in dog_wlt.split('-')]

                if fav_wlt[2] == 0:  # if the win-loss-tie data is non-existent
                    fav_win_pct = 0.50  # assume equal chances of winning
                    dog_win_pct = 0.50
                else:  # otherwise
                    fav_win_pct = fav_wlt[0] / fav_wlt[
                        2]  # compute the win percentage
                    dog_win_pct = dog_wlt[0] / dog_wlt[2]

                # populate spread map
                # if the spread is not already in the favorite spread map
                if float(fav_spread) not in fav_spread_map.keys():
                    fav_spread_map[float(fav_spread)] = tuple(
                        (1, fav_win_pct))  # add the spread and the pair
                else:  # otherwise
                    current_fav_freq = fav_spread_map[float(fav_spread)][
                        0]  # get the current pair
                    fav_spread_map[float(fav_spread)] = tuple(
                        (current_fav_freq + 1, fav_win_pct))  # update the pair

                # if the spread is not already in the underdog spread map
                if float(dog_spread) not in dog_spread_map.keys():
                    dog_spread_map[float(dog_spread)] = tuple(
                        (1, dog_win_pct))  # add the spread and the pair
                else:  # otherwise
                    current_dog_freq = dog_spread_map[float(dog_spread)][
                        0]  # get the current pair
                    dog_spread_map[float(dog_spread)] = tuple(
                        (current_dog_freq + 1, dog_win_pct))  # update the pair

            if len(fav_spread_map.keys()
                   ) != 0:  # if there are elements in the maps
                # compute the win percentages for the favorite and the underdog
                final_fav_win_pct = \
                    WinPercentageCalculator.compute_weighted_mean(fav_spread_map, weight_function, coefficients)
                final_dog_win_pct = \
                    WinPercentageCalculator.compute_weighted_mean(dog_spread_map, weight_function, coefficients)

                target_csv[row_idx].append(
                    str(final_fav_win_pct
                        ))  # append the two win percentages to the row
                target_csv[row_idx + 1].append(str(final_dog_win_pct))
            else:  # otherwise display the rows thus far
                self.display(row_idx)
                self.display(fav_ks_row)
                self.display(dog_ks_row)
                self.display(ks_spacer_row)
                self.display('\n')

        FileIO.write_csv(target_csv,
                         target_csv_name)  # store the target csv to memory
예제 #39
0
파일: A2.py 프로젝트: camvaz/ccsw
from FileIO import FileIO
from models.Set import Set

if __name__ == "__main__":
    # Declaring variables
    opc: str
    io: FileIO = FileIO("./data")
    conjunto: Set = Set()
    fileName: str

    # Dibujillo de bienvenida
    print("""\
              _
             | |
             | |===( )   //////
             |_|   |||  | o o|
                    ||| ( c  )                  ____
                     ||| \= /                  ||   \_
                      ||||||                   ||     |
                      ||||||                ...||__/|-"
                      ||||||             __|________|__
                        |||             |______________|
                        |||             || ||      || ||
                        |||             || ||      || ||
------------------------|||-------------||-||------||-||-------
                        |__>            || ||      || ||
    """)

    print("PSP A2\n\n\n".center(53))
    fileName = input("Introduzca el nombre del archivo: ")
    print("Read: r\nWrite: w\nPruebas: t")
예제 #40
0
class ConversionUI(Frame):

    def __init__(self, master=None):
        Frame.__init__(self, master)
        master.title('File Converter')
        self.pack()

        self.fileIO = FileIO()
        self.fromFile = None
        self.toFile = None
        self.AC = AvailableConversions()
        self.conversionTable = self.AC.getConversionTable()

        self.fromFileType = DOT_HEX
        self.toFileType = DOT_MIF

        self.menuBar()

        self.mainFrame()

    def mainFrame(self):
        convertFromToFrame = LabelFrame(self, text = 'Available conversion: from -> to')
        self.fromListbox = Listbox(convertFromToFrame)
        i = 0
        for keys in self.conversionTable:
            self.fromListbox.insert(i, keys)
            i = i + 1
        self.fromListbox.pack(side='left')
        self.toListbox = Listbox(convertFromToFrame)
        self.toListbox.pack(side='left')
        convertFromToFrame.pack()

        self.convertButton = Button(self, text = 'Select Files and Convert', command = self.convertFile)
        self.convertButton.config(state=DISABLED)
        self.convertButton.pack(fill = 'both')


        logFrame = LabelFrame(self, text = 'Log')
        self.logText = StringVar()
        self.log = Message(logFrame, textvariable = self.logText, bg='white')
        self.log.pack(fill = 'both')
        logFrame.pack(fill='both')

        self.fromListbox.bind('<<ListboxSelect>>', self.updateToListbox)

    def convertFile(self):

        if self.fromFileType == None:
            self.logText.set('Please select a file to open.')
            self.fileIO.errorPopup('Please select a file to open.')
        elif self.toFileType == None:
            self.logText.set('Please select a file to be saved to.')
            self.fileIO.errorPopup('Please select a file to be saved to.')
        else:

            try:
                self.AC.convert(self.fromFileType, self.toFileType)
                if self.AC.wasSuccessful():
                    self.logText.set('Successfully converted {0} to {1}'.format(self.fromFileType, self.toFileType))
            except:
                self.logText.set('Could not convert {0} to {1}'.format(self.fromFileType, self.toFileType))
                self.fileIO.errorPopup('Could not convert {0} to {1}'.format(self.fromFileType, self.toFileType))

    def updateToListbox(self, event):
        sel = self.fromListbox.curselection()
        self.fromFileType = self.fromListbox.get(sel)
        self.logText.set('{} selected to open'.format(self.fromFileType))

        if self.toListbox.size() != 0:
            self.toListbox.delete(0, self.toListbox.size())

        for i in range(len(self.conversionTable[self.fromFileType])):
            self.toListbox.insert(i, self.conversionTable[self.fromFileType][i])

        self.convertButton.config(state=DISABLED)
        self.toListbox.bind('<<ListboxSelect>>', self.setToFileType)

    def setToFileType(self, event):
         sel = self.toListbox.curselection()
         self.toFileType = self.toListbox.get(sel)
         self.logText.set('{} selected to save to'.format(self.toFileType))

         # Enable convert button
         self.convertButton.config(state=ACTIVE)

    def menuBar(self):
        self.menubar = Menu(self)

        filemenu = Menu(self.menubar, tearoff=0)
        filemenu.add_command(label="Exit", command=self.quit)
        self.menubar.add_cascade(label="File", menu=filemenu)

        toolsmenu = Menu(self.menubar, tearoff=0)
        toolsmenu.add_command(label="Radix Conversion", command=self.radixConversion)
        toolsmenu.add_separator()
        toolsmenu.add_command(label="ADC Code/Volt Convert", command=self.adcCodeVolt)
        self.menubar.add_cascade(label="Tools", menu=toolsmenu)

        helpmenu = Menu(self.menubar, tearoff=0)
        helpmenu.add_command(label="How To Use", command=self.howToUse)
        helpmenu.add_command(label="About", command=self.aboutProg)
        self.menubar.add_cascade(label="Help", menu=helpmenu)

        self.master.config(menu=self.menubar)

    def radixConversion(self):
        root = Toplevel()
        subapp = RadixConversionUI(master=root)
        subapp.mainloop()

    def adcCodeVolt(self):
        root = Toplevel()
        subapp = ADCCodeVoltUI(master=root)
        subapp.mainloop()

    def aboutProg(self):
        pass

    def howToUse(self):
        pass
예제 #41
0
def __main__():
    libraryDict = LibraryDict()
    fileIOHandler = FileIO()
예제 #42
0
#!/usr/bin/env python2.7
#encoding=utf-8

"""
"""
from xml.dom.minidom import parse,parseString
import codecs
from FileIO import FileIO

XML_FILE="../data/parser/all_clause_parse_utf8.dat"
FINDING_PATTERN = ["ADV@HED","SBV@ADV@HED","SBV@ADV@ADV@HED","ADV@ADV@HED","ADV@HED@MT","ATT@SBV@ADV@HED","SBV@ADV@HED@MT","ADV@HED@VOB","ADV@DE@ATT@HED","SBV@ADV@HED@VOB","CNJ@ADV@HED","ADV@DE@HED","SBV@ADV@DE@HED","DE@ATT@SBV@ADV@HED","ADV@HED@ATT@VOB","DE@ATT@ADV@HED"]
FINDING_RELATE = "ADV"
WORD_FILE = "data/pattern/ADV.dat"

if __name__ == "__main__":
    fio = FileIO()
    highfreq_clauses = fio.readFileToList("data/high_freq_clauses.dat")
    relate_clauselist = {}
    word_list = []#specific word with specific relation,record them to file for more use
    with codecs.open(XML_FILE,'r','utf-8') as f:
        #print "Read file and store relateString and clause into memory..."
        count = 0
        while(True):
            line = f.readline()
            if not line:
                break
            if line.startswith('Input'):
                clause = line.strip('\n').split(': ',2)[1]
                f.readline()
                xml_string = ""
                xml_line = ""