Example #1
0
def deep_logit(input_dim, hidden_layer_dims, output_dim, learning_rate, reg_rate, batch_size, max_num_iters):

    x_train = tf.placeholder(tf.float32)
    y_labels = tf.placeholder(tf.float32)
    x_train_flattened = vectorize_input(x_train, batch_size)

    # params_conv = init_conv_params(filter_size=3, num_input_channels=1, num_filters=1, strides=[1, 1, 1, 1])
    # conv_op = conv_layer_forward(params_conv, x_train)

    vectorised_conv_output = vectorize_input(x_train_flattened, batch_size)
    [logit, _, params] = deep_softmax_forward_pass(vectorised_conv_output, input_dim, hidden_layer_dims, output_dim)

    data_loss = tf.nn.softmax_cross_entropy_with_logits(logit, y_labels)
    regularization_loss = compute_regularization_loss(params, reg_rate)
    loss = tf.add(data_loss, regularization_loss)

    optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=.9)

    train_op = optimizer.minimize(loss)

    db = DataParser()

    with tf.Session() as sess:
        init_op = tf.initialize_all_variables()
        sess.run(init_op)

        for _ in range(max_num_iters):

            # Get batch data:
            [batch_x, batch_labels] = db.get_next_batch(batch_size)

            # Single back propagation + update operation
            [_, curr_loss, _] = sess.run([train_op, loss, logit], {x_train: batch_x, y_labels: batch_labels})

    return curr_loss
Example #2
0
def main():
    searchType = input(
        "Welcome to OSHA Compliant HeatMiser! \nPlease select your option by pressing the appropriate number:"
        + " 1 for decision tree, 2 for k means clustering, or 3 to quit: ")
    if (searchType == "1"):
        print("Decision tree approach selected!")
        search = 0
        dt = DecisionTree()
        data = dp.getDataArrays()
        real_f_ones = ten_fold_cross_validation(data)
        fake_f_ones = generate_baseline(data)
        plotComparison(real_f_ones, fake_f_ones)
    elif (searchType == "2"):
        print(
            "K means clustering approach selected! Running model with optimal k = 2"
        )
        data = dp.getDataList()
        kCluster = KMeansClustering(2)
        kCluster.run(data)
        # Uncomment below to get either OSHA (1) or location (2) plot
        # getStatusPlot(data, 2)

        # Uncomment below to run elbow method
        # determineOptimalK(data)

    elif (searchType == "3"):
        print("Shutting down...")
    else:
        print("Sorry, that was an incorrect command. Shutting down...")
        sys.exit()
Example #3
0
def read_data(file_path):
    parser = DataParser(file_path, ',', True)
    features = []
    labels = []
    for row in parser.readrows():
        feature = list()
        feature.append(float(row['density']))
        feature.append(float(row['sugar_content']))
        features.append(feature)
        labels.append(int(row['label']))
    return features,labels
Example #4
0
def checkQueue():
    try:
        data, source = mainQueue.get(timeout=0.05)
        if (type(source) == RobotServer.RobotServer
                or type(source) == DatabaseConnection.DatabaseConnection):
            checkCommand(DataParser.parseData(data))
        elif (type(source) == RobotMovement.RobotMovement):
            serverThread.insertServerQueue(serverThread.sendData,
                                           DataParser.createServerPacket(data))
            if (data["robotID"] != None):
                dbThread.insertDBQueue(dbThread.sendData,
                                       DataParser.createDatabasePacket(data))
    except Queue.Empty:
        return
def train_neural_model(model):
    DataParser.TOP = TOP
    x_train, x_test, y_train, y_test = DataParser.get_splitted_data_for_model(
        LOAD_DATA_FROM_FILE)

    print(x_train.shape, y_train.shape)

    # checkpoint_name = './modelDP/Weights-{epoch:03d}--{val_loss:.5f}.hdf5'
    # checkpoint_name = './modelDP/Weights-best1.hdf5'

    checkpoint = keras.callbacks.ModelCheckpoint(MODEL_FILE_NAME,
                                                 monitor='val_loss',
                                                 verbose=1,
                                                 save_best_only=True,
                                                 mode='auto')

    earlyStopping = keras.callbacks.EarlyStopping(patience=16, mode="auto")
    tensorBoard = keras.callbacks.TensorBoard(log_dir="./logs")

    callbacks_list = [checkpoint, earlyStopping]

    # model = create_neural_model()
    model.fit(x_train,
              y_train,
              epochs=EPCHOES,
              batch_size=444,
              validation_split=0.2,
              callbacks=callbacks_list)
    print("Current one: ", model.evaluate(x_test, y_test, batch_size=44))

    model.load_weights(MODEL_FILE_NAME)
    print("Best one: ", model.evaluate(x_test, y_test, batch_size=44))
Example #6
0
 def __init__(self, isTrain, maxLength=0, n_mfcc=N_MFCC):
     if isTrain in [True, False]:
         self.N_MFCC = n_mfcc
         self.isTrain = isTrain
         self.maxLength = maxLength
         self.dataParser = DataParser.DataParser()
         self.vad = VAD.VAD()
         self.dataPathDictionary = self.__createDicitonaryForDataPath__()
         self.__create_random_noise_files__()
         self.__makeEverySpectogramSame__()
Example #7
0
    def __init__(self):
        # type: () -> None
        self.plotter = PlotMaker()
        self.fitter = FitFinder()
        self.data_parser = DataParser()

        self.use_fills = False
        self.make_fits = False
        self.use_fit_file = False  # Currently unused outside of setupCheck
        self.update_online_fits = False
        self.plotter.compare_fits = False  # Compare fits to multiple groups of runs

        self.use_pileup = True  # plot <PU> vs. rate
        self.use_lumi = False  # plot iLumi vs. rate
        self.use_LS = False  # plot LS vs. rate

        self.use_stream_bandwidth = False
        self.use_stream_size = False

        self.use_grouping = False  # Creates sub directories to group the outputs, utilizes self.group_map

        # TESTING: START #

        self.certify_mode = False

        # TESTING: END #

        self.group_map = {}  # {'group_name': [trigger_name] }

        self.fill_list = []  # Fills to get data from, Currently Unused
        self.run_list = []  # Runs to get data from
        self.object_list = [
        ]  # List of *ALL* objects to plot, except for when using grouping

        self.ops = None  # The options specified at the command line

        self.rate_mon_dir = os.getcwd()
        self.save_dir = os.path.join(os.getcwd(), "tmp_rate_plots")
        self.certify_dir = os.path.join(os.getcwd(), "tmp_certify_dir")
        self.online_fits_dir = os.path.join(os.getcwd(), "Fits")
        self.fit_file = None  # Currently Unused
Example #8
0
    def run(self):

        while 1:
            xml_text = self.in_queue.get()
            print "Received XML"
            if xml_text == "None":
                self.out_queue.put("None")
                break

            json_doc = DataParser.parse_get_state_stats_resp(xml_text)
            print "Made JSON"
            self.out_queue.put(json_doc)
def InsertData_DB( Type, RawData , DEVICE_ID ):

    if( Type == "wst" ): Records = DataParser.get_wifi(RawData.split('\n'))
    if( Type == "pst" ): Records = DataParser.get_phonestate(RawData.split('\n'))
    if( Type == "bst" ): Records = DataParser.get_battery(RawData.split('\n'))
    if( Type == "btst" ): Records = DataParser.get_bluetooth(RawData.split('\n'))
    if( Type == "gst" ): Records = DataParser.get_gsm(RawData.split('\n'))
    if( Type == "lst" ): Records = DataParser.get_location(RawData.split('\n'))

    for record in Records:

        DataObject = None

        if( Type == "wst" ):
            DataObject = WiFi( DeviceID=DEVICE_ID , **record )

        if( Type == "pst" ):
            DataObject = Phone( DeviceID=DEVICE_ID , **record )

        if( Type == "bst" ):
            DataObject = Battery( DeviceID=DEVICE_ID , **record )

        if( Type == "btst" ):
            DataObject = Bluetooth( DeviceID=DEVICE_ID , **record )

        if( Type == "gst" ):
            DataObject = Cellular( DeviceID=DEVICE_ID , **record )

        if( Type == "lst" ):
            DataObject = GPS( DeviceID=DEVICE_ID , **record )

        DataObject.save()
Example #10
0
def scatter_sample():
    parser = DataParser('./watermelon_data_3a.txt', ',', True)
    dataset = parser.load_data()

    X = dataset[:, 0:2]
    y = dataset[:, 2]
    f1 = plt.figure(1)
    plt.title('watermelon_3a')
    plt.xlabel('density')
    plt.ylabel('sugar_content')
    plt.scatter(X[y == 0, 0],
                X[y == 0, 1],
                marker='o',
                color='k',
                s=100,
                label='bad')
    plt.scatter(X[y == 1, 0],
                X[y == 1, 1],
                marker='o',
                color='g',
                s=100,
                label='good')
    plt.legend(loc='upper right')
    plt.show()
Example #11
0
    def run(self):
        text = self.text_tup[0]
        location = self.text_tup[1]
        writers_params = self.text_tup[2]
        writers = writers_params["writers"]
        locks = writers_params["locks"]
        match_rule = writers_params["match_rule"]
        metadata_table = writers_params["metadata_table"]

        print "parsing", location
        fluentd_accum = " "
        hbase_accum = " "

        fluentd_accum, hbase_accum = DataParser.parse_get_state_stats_resp(text) 
        DatastoreWriterWorker.DatastoreWriterWorker( ( {"fluentd":fluentd_accum,"hbase":hbase_accum}, location)  , writers, locks, match_rule, metadata_table).start()
        print "done parsing", location
Example #12
0
    def __init__(self):
        self.level_manager = LevelData.LevelProgressionManager()
        """
        tile_bindings is used to give our tiles a meaning.
        Each TileMap must be presented with a BindingType dictionary in order to understand what a number in a tilemap file means.

        For example, we could create a dictionary that includes {"0", ["Solid"]}, {"1", ["Solid", "Damage"] }
        When our tilemap parses in a file that contains a 0, that tile will have a binding_type of "Solid"
        When our tilemap parses in a 1, that tile will have a binding_type of "Solid" and "Damage"

        It is up to you to give these tiles meaning in your game.
        """
        tile_bindings = {}
        tile_bindings["0"] = ["Passthrough"]
        tile_bindings["1"] = ["Solid"]
        tile_bindings["2"] = ["Hazard"]
        tile_bindings["3"] = ["Warp"]
        tile_bindings["r"] = ["Reload"]

        #Load all of our levels. MapCount is the number of levels we need to load. (Levels start at index 0.)
        mapCount = 8
        for i in range(mapCount):
            data_path = "..//Maps//Level" + str(i) + "//Data.txt"
            dta = DataParser.parse(data_path)
            prefix = "..//Maps//Level" + str(i) + "//Lv" + str(i)
            tile_path = prefix + "_TileMap.txt"
            collision_path = prefix + "_CollisionMap.txt"
            object_path = prefix + "_ObjectMap.txt"
            level = "Level " + str(i)
            self.level_manager.levels.append(level)
            tile_map = r.SpriteSheet()
            tile_map.load_texture("..//Textures//" + dta[0],
                                  cell_size=r.Vector2(16, 16))
            _map = r.TileMap(tile_map, tile_bindings, tile_path,
                             collision_path, object_path, LevelData.ObjAry,
                             level)
            r.TileMapManager.add_map(_map)

        #Change this variable to the level you want to test.
        level_test = 0
        self.level_manager.current_level = (level_test - 1)
        self.level_manager.load_next_level()

        #Prepare our hero tile for use.
        self.hero = LevelData.load_character(r.TileMapManager.active_map, self)
        self.hero.set_default_pos(r.TileMapManager.active_map.start_location)
        r.Ragnarok.get_world().add_obj(self.hero)
def main(_):
    x_train, x_test, y_train, y_test = DataParser.get_data_for_model(
        LOAD_DATA_FROM_FILE)
    c_t = []
    c_test = []

    xs = tf.placeholder('float')
    ys = tf.placeholder('float')

    output = neural_net_model(xs, NUM_OF_COLS)

    cost = tf.reduce_mean(tf.square(output - ys))

    train = tf.train.GradientDescentOptimizer(0.001).minimize(cost)

    with tf.Session() as sess:
        with tf.device("/cpu:0"):
            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver()
            # saver.restore(sess, MODEL_FILE_NAME)

            for epoch in range(EPCHOES):
                for j in range(x_train.shape[0]):
                    sess.run([cost, train],
                             feed_dict={
                                 xs: x_train[j, :].reshape(1, NUM_OF_COLS),
                                 ys: y_train[j]
                             })

                c_t.append(sess.run(cost, feed_dict={
                    xs: x_train,
                    ys: y_train
                }))
                c_test.append(
                    sess.run(cost, feed_dict={
                        xs: x_test,
                        ys: y_test
                    }))

                print("Epoch ", epoch, " cost: ", c_test[epoch])

        # pred = sess.run(output, feed_dict={xs:x_test})
        saver.save(sess, MODEL_FILE_NAME + "test.ckpt")
        print("model saved")

        _start_shell(locals())
Example #14
0
def test_model(test_x, test_y, model_dict, checkpoint_path):
    '''
    Todo: test the model
    :param test_x: test data
    :param test_y: test labels
    :param model_dict: model dictionary that store the graph
    :param checkpoint_path: the checkpoint of the model
    :return: predicted results of the input data, with the dimension of (length, 1, 1)
    '''
    TRAIN_POINTS = test_x.shape[0]
    input_test_x = test_x[0, :, :][np.newaxis, :, :]
    test_pred = np.zeros(shape=(TRAIN_POINTS, 1, 1))
    loss = 0
    with tf.Session(graph=model_dict['graph']) as sess:
        # load the model
        print('[INFO] Loading model...')
        checkpoint = tf.train.latest_checkpoint(checkpoint_path)
        saver = tf.train.Saver()
        saver.restore(sess, checkpoint)
        print(
            "[INFO] model restore from the checkpoint {0}".format(checkpoint))

        print('[INFO] Testing...')
        for i in range(TRAIN_POINTS):
            # input_train_x = train_x[i,:,:][np.newaxis,:,:]
            input_test_y = test_y[i, :, :][np.newaxis, :, :]
            feed_dict = {
                model_dict['input_x']: input_test_x,
                model_dict['input_y']: input_test_y,
                model_dict['batch_size_ph']: 1
            }
            lossV, pred_tmp = sess.run(
                [model_dict['loss'], model_dict['output']],
                feed_dict=feed_dict)
            loss += lossV
            input_test_x = dp.get_test_data(input_test_x, pred_tmp[:, -1, :])
            test_pred[i, :, :] = pred_tmp[:, -1, :]
        print('[INFO] Testing over...')
        print('Testing loss: %f' % (loss / TRAIN_POINTS))

        return test_pred
Example #15
0
def initData():
	'''initData
		Initializes the data from the generated QT data set

		args:
			None

		return:
			[gdict, fig, ax, limits]			this is the variable pack. Saved because gdict 
										can be very large.
	'''

	plt.clf()

	#ion is interactive mode on. This makes it so the window doesn't close after processing
	plt.ion()

	#Get data from the quadtree file
	gdict,limits,rectVects = DataParser.parseQT()

	fig, ax = Plotting.plotBox(gdict, limits, rectVects)

	####this is pack
	return [gdict, fig, ax, limits]
import DataParser
import ClassFilter
import TimetableMaker

print("started")

# Mock user data
semester = 1
classesId = [178880, 178754]

# Parse csv into array of stripped data
dataRawArray = DataParser.csvIntoRawArray(DataParser.openFile("HorariosRaw.csv"))
dataStrippedArray = DataParser.rawArrayToStrippedArray(dataRawArray)

# Filter and separate classes
aulas = {}
for disciplina in classesId:
    possibleClasses = ClassFilter.possibleClasses(dataStrippedArray, semester, disciplina) # All classes for the semester
    noDuplicates = ClassFilter.removeDuplicates(possibleClasses, True)
    aulas[disciplina] = ClassFilter.groupByType(noDuplicates)

# Make timetable
possibleCombinations = TimetableMaker.possibleCombinations(aulas)
noOverlaps = TimetableMaker.removeOverlaps(aulas, possibleCombinations)

# Output to Excel
wb = TimetableMaker.outputExcel(aulas, noOverlaps)

# Output to html
TimetableMaker.convertExcelToWeb(wb)
Example #17
0
            scores.append({'mean': mean, 'std': std, 'params': param})

        newlist = list()
        newlist = sorted(scores, key=lambda k: k['mean'], reverse=True)

        melhoresParams = newlist[:5]
        melhoresScore = scores.copy()

        classificadores[nome][2] = melhoresParams.copy()
        classificadores[nome][3] = melhoresScore.copy()


if __name__ == "__main__":
    inputDir = "./DataSets/Raw/"
    inputFiles = FileUtils.abrirDataSets(inputDir)
    inputFiles = DataParser.parseFiles(inputFiles)

    outputDir = "./DadosColetados/"
    outputFiles = dict()
    for file in inputFiles:
        outputFiles[file] = outputDir + file + ".csv"

    outputData = dict()
    for file in inputFiles:
        outputData[file] = dict()

        if file == "Zoo": n_splits = 3
        elif file == "Poker": n_splits = 5
        elif file == "Flags": n_splits = 4
        else: n_splits = 10
Example #18
0
        matched_r = db_match_suite(street_parameters, r)
        if matched_r:
            result = _map_to_dict(matched_r)
            print "Suite match: {}-{} {}, suite {}".format(
                result["start_address"], result["end_address"], result["street_name"], result["db_suite"]
            )
            return result
        else:
            print "Multiple results returned for address, no matching suite found."
    else:
        pass
    return None


### MAIN
row_dicts = DP.get_csv_dict(INPUT_ADDRESSES, YEAR)
print "Rows for %s:" % (YEAR,), len(row_dicts)
index = 0
matches = 0
output_rows = []
missed_addresses = []
row_dicts = [row for row in row_dicts if row["prix_vendu"] != ""]  # remove rows with blank sales price
for row in row_dicts:
    street_parameters = DP.get_street_parameters(row)
    # skip inputs without a designated street address
    if not street_parameters["street_number_lower"] and not street_parameters["street_number_upper"]:
        continue
    if not street_parameters["street_number_lower"]:
        continue
    if street_parameters["street_number_upper"] and not street_parameters["street_number_lower"]:
        street_parameters["street_number_lower"] = street_parameters["street_number_upper"]
Example #19
0
 def test_load_data(self):
     parser = DataParser('./watermelon_data_3a.txt', ',', True)
     dataset = parser.load_data()
     row = dataset[0]
     assert (row[0] - 0.697) < 0.1
     assert row[2] == 1
Example #20
0
class RateMonitor:
    def __init__(self):
        # type: () -> None
        self.plotter = PlotMaker()
        self.fitter = FitFinder()
        self.data_parser = DataParser()

        self.use_fills = False
        self.make_fits = False
        self.use_fit_file = False  # Currently unused outside of setupCheck
        self.update_online_fits = False
        self.plotter.compare_fits = False  # Compare fits to multiple groups of runs

        self.use_pileup = True  # plot <PU> vs. rate
        self.use_lumi = False  # plot iLumi vs. rate
        self.use_LS = False  # plot LS vs. rate

        self.use_stream_bandwidth = False
        self.use_stream_size = False

        self.use_grouping = False  # Creates sub directories to group the outputs, utilizes self.group_map

        # TESTING: START #

        self.certify_mode = False

        # TESTING: END #

        self.group_map = {}  # {'group_name': [trigger_name] }

        self.fill_list = []  # Fills to get data from, Currently Unused
        self.run_list = []  # Runs to get data from
        self.object_list = [
        ]  # List of *ALL* objects to plot, except for when using grouping

        self.ops = None  # The options specified at the command line

        self.rate_mon_dir = os.getcwd()
        self.save_dir = os.path.join(os.getcwd(), "tmp_rate_plots")
        self.certify_dir = os.path.join(os.getcwd(), "tmp_certify_dir")
        self.online_fits_dir = os.path.join(os.getcwd(), "Fits")
        self.fit_file = None  # Currently Unused

    # The main function of RateMonitor, handles all the stitching together of the other pieces of code
    # NOTE1: This might be doing to many things --> break up into pieces?
    def run(self):
        # type: () -> None

        if not self.setupCheck():
            print "ERROR: Bad setup"
            return

        print "Using runs:", self.run_list
        print "Using Prescaled rates:", self.data_parser.use_prescaled_rate

        if self.update_online_fits:
            # Ensures that DataParser gets all triggers
            self.data_parser.hlt_triggers = []
            self.data_parser.l1_triggers = []

        ### THIS IS WHERE WE GET ALL OF THE DATA ###
        self.data_parser.parseRuns(self.run_list)

        if self.data_parser.use_streams:
            # We want to manually add the streams to the list of objects to plot
            self.object_list += self.data_parser.getObjectList(
                obj_type="stream")
            if self.use_grouping:
                self.group_map["Streams"] = self.data_parser.getObjectList(
                    obj_type="stream")

        if self.data_parser.use_datasets:
            # Same concept, but for datasets
            self.object_list += self.data_parser.getObjectList(
                obj_type="dataset")
            if self.use_grouping:
                self.group_map["Datasets"] = self.data_parser.getObjectList(
                    obj_type="dataset")

        if self.data_parser.use_L1A_rate:
            # Manually add L1A rates to the list of objects to plot
            self.object_list += self.data_parser.getObjectList(obj_type="L1A")
            if self.use_grouping:
                self.group_map["L1A_Rates"] = self.data_parser.getObjectList(
                    obj_type="L1A")

        bunch_map = self.data_parser.getBunchMap()
        det_status = self.data_parser.getDetectorStatus()
        phys_status = self.data_parser.getPhysStatus()

        # Select the types of data we are going to plot
        if self.use_pileup:  # plot PU vs. rate
            x_vals = self.data_parser.getPUData()
        elif self.use_lumi:  # plot iLumi vs. rate
            x_vals = self.data_parser.getLumiData()
        elif self.use_LS:  # plot LS vs. rate
            x_vals = self.data_parser.getLSData()

        if self.use_stream_size:
            y_vals = self.data_parser.getSizeData()
        elif self.use_stream_bandwidth:
            y_vals = self.data_parser.getBandwidthData()
        else:
            y_vals = self.data_parser.getRateData()

        # Now we fill plot_data with *ALL* the objects we have data for

        #plot_data = {}     # {'object_name': { run_number:  ( [x_vals], [y_vals], [det_status] , [phys_status] ) } }
        #for name in self.data_parser.getNameList():
        #    if not plot_data.has_key(name):
        #        plot_data[name] = {}
        #    for run in sorted(self.data_parser.getRunsUsed()):
        #        if not x_vals[name].has_key(run):
        #            continue
        #        plot_data[name][run] = [x_vals[name][run],y_vals[name][run],det_status[name][run],phys_status[name][run]]

        plot_data = self.getData(x_vals, y_vals, det_status, phys_status,
                                 self.fitter.data_dict['user_input'])

        # If no objects are specified, plot everything!
        if len(self.object_list) == 0:
            self.object_list = [x for x in self.data_parser.name_list]

        self.setupDirectory()

        ### NOW SETUP PLOTMAKER ###
        self.plotter.setPlottingData(plot_data)
        self.plotter.bunch_map = bunch_map

        normalization = 1
        if self.data_parser.normalize_bunches:
            max_key = max(bunch_map.iterkeys(),
                          key=(lambda key: bunch_map[key]))
            normalization = bunch_map[max_key]
        print "Fit Normalization: %d" % normalization

        # Make a fit of each object to be plotted, and save it to a .pkl file
        if self.make_fits:

            fit_info = {
                'run_groups': copy.deepcopy(self.fitter.data_dict),
                'triggers': {}
            }
            for group, runs in self.fitter.data_dict.iteritems():
                data = self.getData(x_vals, y_vals, det_status, phys_status,
                                    runs)
                data_fits = self.fitter.makeFits(data, self.object_list,
                                                 normalization, group)
                fit_info['triggers'] = self.fitter.mergeFits(
                    fit_info['triggers'], data_fits)
                print group, runs

            self.plotter.setFits(fit_info)
            self.fitter.saveFits(self.plotter.fit_info, "fit_file.pkl",
                                 self.save_dir)
            #self.fitter.saveFits(fit_info,"fit_file.pkl",self.save_dir)
            #self.plotter.setFits(fit_info)

        elif self.update_online_fits:
            self.updateOnlineFits(plot_data, normalization)
            return  # This keeps us from having to worry about any additional output plots
        elif self.certify_mode:
            self.certifyRuns(plot_data)
            return  # Same as above

        # We want fits and no fits were specified --> make some
        # NOTE: This 'if' is true only when ZERO fits exist
        if self.plotter.use_fit and len(self.plotter.fits.keys()) == 0:
            #fits = self.fitter.makeFits(plot_data,plot_data.keys(),normalization)
            #self.plotter.setFits(fits)
            fit_info = {
                'run_groups':
                copy.deepcopy(self.fitter.data_dict),
                'triggers':
                self.fitter.makeFits(plot_data, plot_data.keys(),
                                     normalization)
            }
            self.plotter.setFits(fit_info)

        # This is after update_online_fits, so as to ensure the proper save dir is set
        self.plotter.save_dir = self.save_dir
        self.plotter.plot_dir = "png"

        counter = 0
        # Specifies how we want to organize the plots in the output directory
        if self.use_grouping:
            print "Making Plots..."

            # Create plots for *EVERYTHING* we've queried for
            objs_to_plot = set()
            for obj in self.object_list:
                objs_to_plot.add(obj)
            plotted_objects = self.makePlots(list(objs_to_plot))
            counter += len(plotted_objects)

            # Create index.html files to display specific groups of plots
            for grp in self.group_map:
                grp_path = os.path.join(self.save_dir, grp)
                grp_objs = set()
                for obj in plotted_objects:
                    if obj in self.group_map[grp]:
                        grp_objs.add(obj)
                self.printHtml(png_list=grp_objs,
                               save_dir=self.save_dir,
                               index_dir=grp_path,
                               png_dir="..")

            #for grp in self.group_map:
            #    print "Plotting group: %s..." % grp
            #    grp_path = os.path.join(self.save_dir,grp)
            #    self.plotter.save_dir = grp_path
            #    objs_to_plot = set()
            #    for obj in self.object_list:
            #        # Get the the plot objs associated with this group
            #        if obj in self.group_map[grp]:
            #            objs_to_plot.add(obj)
            #    plotted_objects = self.makePlots(list(objs_to_plot))
            #    self.printHtml(plotted_objects,grp_path)
            #    counter += len(plotted_objects)
        else:
            plotted_objects = self.makePlots(self.object_list)
            #self.printHtml(plotted_objects,self.plotter.save_dir)
            self.printHtml(png_list=plotted_objects,
                           save_dir=self.save_dir,
                           index_dir=self.save_dir,
                           png_dir=".")
            counter += len(plotted_objects)
        print "Total plot count: %d" % counter

    # Makes some basic checks to ensure that the specified options don't create conflicting problems
    def setupCheck(self):
        # type: () -> bool

        ## These two options are mutually exclusive
        #if not (self.use_pileup ^ self.use_lumi): # ^ == XOR
        #    print "ERROR SETUP: Improper selection for self.use_pileup and self.use_lumi"
        #    return False

        # We can't specify two different x_axis at the same time
        if self.use_pileup and self.use_lumi:
            print "ERROR SETUP: Improper selection for self.use_pileup and self.use_lumi"
            return False

        # If we specify a fit file, we don't want to make new fits
        if self.use_fit_file and self.update_online_fits:
            print "ERROR SETUP: Fit file specified while trying updating online fits"
            return False
        elif self.use_fit_file and self.make_fits:
            print "ERROR SETUP: Fit file specified while trying to make fits"
            return False

        # Check to see if the online fits directory exists
        if self.update_online_fits and not os.path.exists(
                self.online_fits_dir):
            print "ERROR SETUP: Could not find fit directory"
            return False

        # We need to make sure we have the map between the plot objects and directories
        if self.use_grouping and len(self.group_map.keys()) == 0:
            print "ERROR SETUP: Grouping selected, but no group map found"
            return False

        # We don't want to accidentally remove all the contents from the current working directory
        if self.save_dir == self.rate_mon_dir:
            print "ERROR SETUP: Save directory is the same as RateMon directory"
            return False

        # Certify mode doesn't create any fits, so we shouldn't be updating any existing fits
        if self.update_online_fits + self.certify_mode > 1:
            print "ERROR SETUP: Can't update online fits and user certify mode at the same time"
            return False

        # In certify_mode we need to specify pre-made fits to use
        if self.certify_mode and len(self.plotter.fits.keys()) == 0:
            print "ERROR SETUP: No fits were found while in certify mode"
            return False

        ## We are configured to only create/display the default fit, so only generate one fit
        #if self.make_fits and not self.fitter.use_best_fit and not self.plotter.use_multi_fit:
        #    print "WARNING: Only creating the default fit, %s" % self.plotter.default_fit
        #    self.fitter.fits_to_try = [self.plotter.default_fit]

        return True

    # Sets up the save directories, will setup the directories based on CLI options
    def setupDirectory(self):
        # type: () -> None
        print "Setting up directories..."

        if self.update_online_fits:
            mon_trg_dir = os.path.join(
                self.online_fits_dir,
                "Monitor_Triggers")  # $rate_mon_dir/Fits/Monitor_Triggers
            all_trg_dir = os.path.join(
                self.online_fits_dir,
                "All_Triggers")  # $rate_mon_dir/Fits/All_Triggers
            if os.path.exists(mon_trg_dir):
                shutil.rmtree(mon_trg_dir)
                print "\tRemoving existing directory: %s " % (mon_trg_dir)
            if os.path.exists(all_trg_dir):
                shutil.rmtree(all_trg_dir)
                print "\tRemoving existing directory: %s " % (all_trg_dir)
            print "\tCreating directory: %s " % (mon_trg_dir)
            os.mkdir(mon_trg_dir)
            os.chdir(mon_trg_dir)
            os.mkdir("plots")
            os.chdir(self.rate_mon_dir)
            print "\tCreating directory: %s " % (all_trg_dir)
            os.mkdir(all_trg_dir)
            os.chdir(all_trg_dir)
            os.mkdir("plots")
            os.chdir(self.rate_mon_dir)
            return
        elif self.certify_mode:
            # Ex: Certification_1runs_2016-11-02_13_27
            dir_str = "Certification_%druns_%s" % (
                len(self.run_list),
                datetime.datetime.now().strftime("%Y-%m-%d_%H_%M"))
            #dir_str = "Certification_%druns" % (len(self.run_list))
            self.certify_dir = os.path.join(self.rate_mon_dir, dir_str)
            if os.path.exists(self.certify_dir):
                shutil.rmtree(self.certify_dir)
                print "\tRemoving existing directory: %s " % (self.certify_dir)
            print "\tCreating directory: %s " % (self.certify_dir)
            os.mkdir(self.certify_dir)
            os.chdir(self.certify_dir)
            for run in self.run_list:
                run_str = "run%d" % run
                run_dir = os.path.join(self.certify_dir, run_str)
                print "\tCreating directory: %s " % (run_dir)
                os.mkdir(run_dir)
                os.chdir(run_dir)
                os.mkdir("png")
                os.chdir(self.certify_dir)
            return
        else:
            if os.path.exists(self.save_dir):
                shutil.rmtree(self.save_dir)
                print "\tRemoving existing directory: %s " % (self.save_dir)
            os.mkdir(self.save_dir)
            os.chdir(self.save_dir)
            print "\tCreating directory: %s " % (self.save_dir)
            os.mkdir("png")
            if self.use_grouping:
                for grp_dir in self.group_map.keys():
                    os.mkdir(grp_dir)
                    print "\tCreating directory: %s " % (os.path.join(
                        self.save_dir, grp_dir))
            os.chdir("../")
            return

    # Stiching function that interfaces with the plotter object
    def makePlots(self, plot_list):
        # type: (List[str]) -> List[str]
        if not self.use_grouping:
            print "Making plots..."

        plotted_objects = []
        counter = 1
        prog_counter = 0
        for _object in sorted(plot_list):
            if prog_counter % max(1, math.floor(len(plot_list) / 10.)) == 0:
                print "\tProgress: %.0f%% (%d/%d)" % (
                    100. * prog_counter / len(plot_list), prog_counter,
                    len(plot_list))
            prog_counter += 1
            if not self.plotter.plotting_data.has_key(_object):
                # No valid data points could be found for _object in any of the runs
                print "\tWARNING: Unknown object - %s" % _object
                continue
            self.formatLabels(_object)

            if self.plotter.plotAllData(_object):
                plotted_objects.append(_object)
                counter += 1
        return plotted_objects

    # Formats the plot labels based on the type of object being plotted
    # TODO: Might want to move this (along with makePlots() into PlotMaker.py),
    #       would require specifying a type_map within the plotter object
    def formatLabels(self, _object):
        # type: (str) -> None

        x_axis_label = ""
        y_axis_label = ""

        x_axis_var = ""
        y_axis_var = ""

        x_units = ""
        y_units = "[Hz]"

        if self.use_pileup:  # plot PU vs. rate
            x_axis_var = "< PU >"
        elif self.use_lumi:  # plot iLumi vs. rate
            x_axis_var = "instLumi"
        else:  # plot LS vs. rate
            x_axis_var = "lumisection"

        if self.data_parser.type_map[_object] == "trigger":
            if self.data_parser.correct_for_DT == True:
                y_axis_var = "pre-deadtime "

            if self.data_parser.use_prescaled_rate:
                y_axis_var += "prescaled rate"
            else:
                y_axis_var += "unprescaled rate"
        elif self.data_parser.type_map[_object] == "stream":
            if self.use_stream_size or self.use_stream_bandwidth:
                y_units = "[bytes]"
            y_axis_var = "stream rate"
        elif self.data_parser.type_map[_object] == "dataset":
            y_axis_var = "dataset rate"
        elif self.data_parser.type_map[_object] == "L1A":
            y_axis_var = "L1A rate"

        x_axis_label += x_axis_var
        y_axis_label += y_axis_var

        # Format the y_axis_label denominator
        if self.data_parser.normalize_bunches and self.data_parser.use_cross_section:
            y_axis_label += " / (num bx*iLumi)"
        elif self.data_parser.normalize_bunches:
            y_axis_label += " / num colliding bx"
        elif self.data_parser.use_cross_section:
            y_axis_label += " / iLumi"

        y_axis_label += " " + y_units

        self.plotter.var_X = x_axis_var
        self.plotter.var_Y = y_axis_var
        self.plotter.label_X = x_axis_label
        self.plotter.label_Y = y_axis_label
        self.plotter.units_X = x_units
        self.plotter.units_Y = y_units

    def updateOnlineFits(self, plot_data, normalization):
        # type: (Dict[str,Dict[int,object]]) -> None

        # NOTE: self.object_list, contains *ONLY* the list of triggers from 'monitorlist_COLLISIONS.list'
        mon_trg_dir = os.path.join(self.online_fits_dir, "Monitor_Triggers")
        all_trg_dir = os.path.join(self.online_fits_dir, "All_Triggers")

        all_triggers = set()
        for obj in self.data_parser.getNameList():
            all_triggers.add(obj)
        all_triggers = list(all_triggers)

        self.plotter.plot_dir = "plots"

        # Plots the monitored paths
        print "Updating monitored trigger fits..."
        print "Total Triggers: %d" % (len(self.object_list))
        self.plotter.save_dir = mon_trg_dir
        #fits = self.fitter.makeFits(plot_data,self.object_list,normalization)
        #self.plotter.setFits(fits)
        #self.fitter.saveFits(fits,"FOG.pkl",mon_trg_dir)
        #fit_info = self.fitter.makeFits(plot_data,self.object_list,normalization)
        fit_info = {
            'run_groups':
            copy.deepcopy(self.fitter.data_dict),
            'triggers':
            self.fitter.makeFits(plot_data, plot_data.keys(), normalization)
        }
        self.plotter.setFits(fit_info)
        self.fitter.saveFits(self.plotter.fit_info, "FOG.pkl", mon_trg_dir)
        plotted_objects = self.makePlots(self.object_list)

        # Plots all trigger paths
        print "Updating all trigger fits..."
        print "Total Triggers: %d" % (len(all_triggers))
        self.plotter.save_dir = all_trg_dir
        #fits = self.fitter.makeFits(plot_data,all_triggers,normalization)
        #self.plotter.setFits(fits)
        #self.fitter.saveFits(fits,"FOG.pkl",all_trg_dir)
        #fit_info = self.fitter.makeFits(plot_data,all_triggers,normalization)
        fit_info = {
            'run_groups':
            copy.deepcopy(self.fitter.data_dict),
            'triggers':
            self.fitter.makeFits(plot_data, plot_data.keys(), normalization)
        }
        self.plotter.setFits(fit_info)
        self.fitter.saveFits(selfl.plotter.fit_info, "FOG.pkl", all_trg_dir)
        plotted_objects = self.makePlots(all_triggers)

        command_line_str = "Results produced with:\n"
        command_line_str += "python plotTriggerRates.py "
        for tup in self.ops:
            #if tup[0].find('--updateOnlineFits') > -1:
            #    # never record when we update online fits
            #    continue
            #elif tup[0].find('--lsVeto') > -1:
            #    continue
            if len(tup[1]) == 0:
                command_line_str += "%s " % (tup[0])
            else:
                command_line_str += "%s=%s " % (tup[0], tup[1])
        for run in self.run_list:
            command_line_str += "%d " % (run)
        command_line_str += "\n"

        command_line_file_name = os.path.join(mon_trg_dir, "command_line.txt")
        log_file_mon = open(command_line_file_name, "w")
        log_file_mon.write(command_line_str)
        log_file_mon.close()

        command_line_file_name = os.path.join(all_trg_dir, "command_line.txt")
        log_file_all = open(command_line_file_name, "w")
        log_file_all.write(command_line_str)
        log_file_all.close()

    def certifyRuns(self, plot_data):
        # type: (Dict[str,Dict[int,object]]) -> None

        #self.plotter.save_dir = self.certify_dir
        #self.plotter.root_file_name = "CertificationSumaries.root"

        # {'name': {run: [ (LS,pred,err) ] } }

        lumi_info = self.data_parser.getLumiInfo()
        sorted_run_list = sorted(self.run_list)

        log_file_name = "CertificationSummary_run" + str(
            sorted_run_list[0]) + "_run" + str(sorted_run_list[-1]) + ".txt"
        log_file = open(self.certify_dir + "/" + log_file_name, 'w')

        for run in self.run_list:
            log_file.write("Run Number: %s\n" % (run))

            self.plotter.save_dir = self.certify_dir
            self.plotter.root_file_name = "CertificationSummaries.root"

            #pred_data = self.getPredictionData(run)     # {'trg name': { 'group name': [ (LS,pred,err) ] } }
            pred_data = self.getPredictionData(
                run
            )  # {'trg name': { 'group name': { 'fit_type': [ (LS,pred,err) ] } } }

            ## Check if there are multiple fit types to plot
            #multi_fit_types = False
            #for trg in pred_data:
            #    for grp in pred_data[trg]:
            #        if len(pred_data[trg][grp].keys()) > 1:
            #            multi_fit_types = True

            for group in self.plotter.run_groups:
                ## We have multiple fit types per trg: separate histograms and summary text file by fit type
                #if self.plotter.use_multi_fit and multi_fit_types:
                #    for fit_type in self.fitter.fits_to_try:
                #        log_file.write("\n")
                #        log_file.write("Group: %s\n" % (group))
                #        log_file.write("Fit type: %s\n" % (fit_type))
                #        self.plotter.makeCertifySummary(run,pred_data,log_file,group,multi_fit_types,fit_type)
                # We have only one fit type per trg: do not separate histograms and summary text file by fit type
                log_file.write("\n")
                log_file.write("Group: %s\n" % (group))
                self.plotter.makeCertifySummary(run, pred_data, log_file,
                                                group)

            print "Making certification plots for run %d..." % run
            run_dir = os.path.join(self.certify_dir, "run%d" % run)
            self.plotter.save_dir = run_dir
            self.plotter.plot_dir = "png"
            self.plotter.root_file_name = "HLT_LS_vs_rawRate_Fitted_Run%d_CERTIFICATION.root" % run
            plotted_objects = []
            for obj in self.object_list:
                if not obj in self.data_parser.name_list:
                    print "Skipping missing trigger: %s" % (obj)
                    continue
                self.formatLabels(obj)
                if self.plotter.makeCertifyPlot(obj, run, lumi_info[run]):
                    print "Plotting %s..." % obj
                    plotted_objects.append(obj)
            self.printHtml(png_list=plotted_objects,
                           save_dir=run_dir,
                           index_dir=self.save_dir,
                           png_dir=".")

    # We create a prediction dictionary on a per run basis, which covers all triggers in that run
    # TODO: Should move this to DataParser.py
    def getPredictionData(self, run):
        # UNFINISHED
        # We need to disable converting the output
        prev_state = self.data_parser.convert_output
        self.data_parser.convert_output = False

        #lumi_info = self.data_parser.parser.getLumiInfo(runNumber=run)  # {run_number: [ (LS,ilum,psi,phys,cms_ready) ] }
        lumi_info = self.data_parser.getLumiInfo(
        )  # {run_number: [ (LS,ilum,psi,phys,cms_ready) ] }
        ls_data = self.data_parser.getLSData(
        )  # {'name': { run_number: [ LS ] } }
        pu_data = self.data_parser.getPUData(
        )  # {'name': { run_number: { LS: PU } } }
        bunch_map = self.data_parser.getBunchMap()  # {run_number: bunches}

        plotter_sigmas = self.plotter.sigmas

        #pred_dict = {}  # {'name': [ (LS,pred,err) ] }
        #pred_dict = {}  # {'trg name': {'group name': [ (LS,pred,err) ] } }
        pred_dict = {
        }  # {'trg name': {'group name': { 'fit_type': [ (LS,pred,err) ] } } }

        for obj in self.plotter.fits:
            if not pu_data.has_key(obj):
                continue
            elif not pu_data[obj].has_key(run):
                continue

            pred_dict[obj] = {}

            for group in self.plotter.fits[obj]:

                pred_dict[obj][group] = {}

                # Find the best fit
                best_fit_type, best_fit = self.fitter.getBestFit(
                    self.plotter.fits[obj][group])

                lsVals = []
                puVals = []
                for LS, ilum, psi, phys, cms_ready in lumi_info[run]:
                    if not ilum is None and phys:
                        if not pu_data[obj][run].has_key(LS):
                            continue
                        lsVals.append(LS)
                        puVals.append(pu_data[obj][run][LS])
                lumisecs, predictions, ls_error, pred_error = self.fitter.getPredictionPoints(
                    best_fit, lsVals, puVals, bunch_map[run], 0)
                pred_dict[obj][group][best_fit_type] = zip(
                    lumisecs, predictions, pred_error)

                ##############################################################################################

        # --- 13 TeV constant values ---
        #ppInelXsec = 80000.
        #orbitsPerSec = 11246.

        ## Initialize our point arrays
        #lumisecs    = array.array('f')
        #predictions = array.array('f')
        #ls_error    = array.array('f')
        #pred_error  = array.array('f')

        ## Unpack values
        #fit_type, X0, X1, X2, X3, sigma, meanraw, X0err, X1err, X2err, X3err, ChiSqr = best_fit

        ## Create our point arrays
        #for LS, ilum, psi, phys, cms_ready in lumi_info[run]:
        #    if not ilum is None and phys:
        #        if not pu_data[obj][run].has_key(LS):
        #            continue
        #        lumisecs.append(LS)
        #        #pu = (ilum * ppInelXsec) / ( self.bunch_map[run] * orbitsPerSec )
        #        pu = pu_data[obj][run][LS]
        #        # Either we have an exponential fit, or a polynomial fit
        #        if fit_type == "exp":
        #            rr = bunch_map[run] * (X0 + X1*math.exp(X2+X3*pu))
        #        elif fit_type == "sinh":
        #            #val = 0
        #            #val += math.pow(X0*pu,11)/39916800.
        #            #val += math.pow(X0*pu,9)/362880.
        #            #val += math.pow(X0*pu,7)/5040.
        #            #val += math.pow(X0*pu,5)/120.
        #            #val += math.pow(X0*pu,3)/6.
        #            #val += math.pow(X0*pu,1)
        #            #val = X1*val + X2
        #            #rr = bunch_map[run] * (val)
        #            rr = bunch_map[run] * (X1*math.sinh(X0*pu) + X2) # ???
        #        else:
        #            rr = bunch_map[run] * (X0 + pu*X1 + (pu**2)*X2 + (pu**3)*X3)
        #        if rr < 0: rr = 0 # Make sure prediction is non negative
        #        predictions.append(rr)
        #        ls_error.append(0)
        #        pred_error.append(bunch_map[run]*plotter_sigmas*sigma)

        ##############################################################################################

        # Revert back to the previous convert_output setting
        self.data_parser.convert_output = prev_state
        return pred_dict

    ## NOTE1: This requires the .png file to be in the proper directory, as specified by self.group_map
    ## NOTE2: This function assumes that the sub-directory where the plots are located is named 'png'
    ##def printHtml(self,png_list,save_dir):
    ##    # type: (List[str],str) -> None
    ##    try:
    ##        htmlFile = open(save_dir+"/index.html", "wb")
    ##        htmlFile.write("<!DOCTYPE html>\n")
    ##        htmlFile.write("<html>\n")
    ##        htmlFile.write("<style>.image { float:right; margin: 5px; clear:justify; font-size: 6px; font-family: Verdana, Arial, sans-serif; text-align: center;}</style>\n")
    ##        for path_name in sorted(png_list):  # This controls the order that the images will be displayed in
    ##            file_name = "%s/png/%s.png" % (save_dir,path_name)
    ##            if os.access(file_name,os.F_OK):
    ##                htmlFile.write("<div class=image><a href=\'png/%s.png\' target='_blank'><img width=398 height=229 border=0 src=\'png/%s.png\'></a><div style=\'width:398px\'>%s</div></div>\n" % (path_name,path_name,path_name))
    ##        htmlFile.write("</html>\n")
    ##        htmlFile.close
    ##    except:
    ##        print "Unable to write index.html file"

    # For this we want to be able to specify where the images are located, relative to the index.html file
    def printHtml(self, png_list, save_dir, index_dir, png_dir="."):
        # save_dir:  The full path to the save directory
        # index_dir: The full path to the index.html file
        # png_dir:   The relative path from the index.html file to the png_dir
        try:
            htmlFile = open(index_dir + "/index.html", "wb")
            htmlFile.write("<!DOCTYPE html>\n")
            htmlFile.write("<html>\n")
            htmlFile.write(
                "<style>.image { float:left; margin: 5px; clear:justify; font-size: 6px; font-family: Verdana, Arial, sans-serif; text-align: center;}</style>\n"
            )
            for path_name in sorted(
                    png_list
            ):  # This controls the order that the images will be displayed in
                file_name = "%s/png/%s.png" % (save_dir, path_name)
                if os.access(file_name, os.F_OK):
                    rel_dir = os.path.join(png_dir, "png/%s.png" % path_name)
                    html_str = ""
                    html_str += "<div class=image>"
                    html_str += "<a href=\'%s\' target='_blank'>" % rel_dir
                    html_str += "<img width=398 height=229 border=0 src=\'%s\'>" % rel_dir
                    html_str += "</a><div style=\'width:398px\'>%s</div></div>\n" % path_name
                    htmlFile.write(html_str)
            htmlFile.write("</html>\n")
        except:
            print "Unable to write index.html file"

    # Returns {'object_name': { run_number:  ( [x_vals], [y_vals], [det_status] , [phys_status] ) } }
    def getData(self, x_vals, y_vals, det_status, phys_status, runs=[]):
        data = {}
        for name in self.data_parser.getNameList():
            if not data.has_key(name):
                data[name] = {}
            for run in sorted(self.data_parser.getRunsUsed()):
                if not x_vals[name].has_key(run):
                    continue
                if len(runs) > 0 and run not in runs:
                    continue
                data[name][run] = [
                    x_vals[name][run], y_vals[name][run],
                    det_status[name][run], phys_status[name][run]
                ]
        return data
Example #21
0
import pygame.camera
import time
from ImageGetter import *
from AzureRequest import *
from DataParser import *
from DataAnalyzer import *

stop = False

pygame.camera.init()
cam = pygame.camera.Camera(pygame.camera.list_cameras()[0])
cam.start()
ar = AzureRequest()
data_parser = DataParser()

while not stop:
    img_getter = ImageGetter()
    capture = img_getter.get_image(cam)
    data_parser.set_data(ar.request('{url: \'http://newsrescue.com/wp-content/uploads/2015/04/happy-person.jpg\'}'))
    print data_parser.parse()
    time.sleep(3)

pygame.camera.quit()
Example #22
0
    def run(self, input_socket):
        f_in = input_socket.makefile()

        # amount bet after flop / 200 - percentage of hand/board
        # cut off for accepting my bets: my bets/200  - percentage of hand/board
        # preflop bet /200 - percentage of starting hand

        # global variables
        d = DataParser()
        minOppPercent = 100  # minimum percept opp goes all in on
        discardRound = 0  # 0 if preflop, 1 if preturn, 2 if preriver
        quitWhileAheadMode = False
        winMode = False
        onEdgeMode = False
        quitRank = 60  # max rank to call in quitwhileaheadmode
        myStack = 200
        myBank = 0
        handID = 1
        numHands = 1000
        bc = BetCalc()

        while True:
            data = f_in.readline().strip()
            if not data:
                print "Gameover, engine disconnected."
                break
            print data

            # QUIT WHILE YOU'RE AHEAD FUNCTION ! **************
            quitWhileAheadMode = myBank > myStack
            #winMode = myBank > 1.5*(numHands-handID)
            winMode = myBank > 10 * (numHands - handID)
            onEdgeMode = myBank < handID - numHands
            #print quitWhileAheadMode
            if winMode:
                s.send("CHECK\n")
                continue

            d.parse(data)

            word = d.word

            if word == "NEWGAME":
                myStack = d.stackSize
                numHands = d.numHands
            elif word == "NEWHAND":
                myBank = d.myBank
                startingHandRank = d.startingHandRank
                handID = d.handID
                totalOppBet = 0
                totalMyBets = 0
            elif word == "GETACTION":

                actionType = d.actionType
                handRank = d.handRank

                # opponent checked
                if actionType == "CHECK BET/RAISE":
                    if len(d.board) == 0:  # range for preflop: 78 to 26
                        bettingNums = [
                            100,  # preflop, go all in 
                            100,  # preflop, large bet/raise
                            100,  # preflop, medium bet/raise
                            52
                        ]  # preflop, small bet/raise
                    else:
                        bettingNums = [
                            101,  # postflop, go all in 
                            101,  # postflop, large bet/raise
                            80,  # postflop, medium bet/raise
                            70
                        ]  # postflop, small bet/raise
                    if (quitWhileAheadMode
                            or onEdgeMode) and handRank < quitRank and len(
                                d.board) == 0:
                        bet = 0
                        s.send("FOLD\n")
                    elif len(
                            d.board
                    ) == 5 and totalOppBet <= 10 and totalMyBets <= 30:
                        bet = d.maxBet
                        s.send(d.betOrRaise + ":" + str(bet) + "\n")
                        print "all in bb"
                    elif len(
                            d.board
                    ) >= 3 and d.lastOppAction == "CHECK" and handRank < 50.0:
                        bet = d.maxBet
                        s.send(d.betOrRaise + ":" + str(bet) + "\n")
                        print "all in bb"
                    elif handRank >= bettingNums[0]:  # all in
                        bet = d.maxBet
                        s.send(d.betOrRaise + ":" + str(bet) + "\n")
                        print "all in bb"
                    elif handRank >= bettingNums[1]:
                        bet = bc.getBetAmount("LARGE", d.maxBet, d.minBet)
                        s.send(d.betOrRaise + ":" + str(bet) + "\n")
                    elif handRank >= bettingNums[2]:
                        bet = bc.getBetAmount("MED", d.maxBet, d.minBet)
                        s.send(d.betOrRaise + ":" + str(bet) + "\n")
                    elif handRank >= bettingNums[3]:
                        bet = bc.getBetAmount("SMALL", d.maxBet, d.minBet)
                        s.send(d.betOrRaise + ":" + str(bet) + "\n")
                    else:
                        bet = d.minBet
                        s.send(d.betOrRaise + ":" + str(bet) + "\n")
                    totalMyBets += bet

                # discard round
                elif actionType == "CHECK DISCARD DISCARD":
                    s.send(d.shouldDiscard)

                # opponent bet and it wasnt all-in
                elif actionType == "FOLD CALL RAISE" or actionType == "FOLD CALL":
                    totalOppBet += d.oppBet

                    if len(d.board) == 0:
                        bettingNums = [
                            100,  # preflop, rank to raise on a bet
                            52,  # preflop, rank to call a bet
                            53,
                            52
                        ]
                    else:
                        bettingNums = [
                            93,  # postflop, rank to raise on a bet
                            90,  # postflop, rank to call a bet
                            80,
                            70
                        ]
                    # quit while youre ahead!
                    if (quitWhileAheadMode
                            or onEdgeMode) and handRank < quitRank and len(
                                d.board) == 0:
                        s.send("FOLD\n")
                    elif d.oppBet >= 150 and handRank < 99:
                        print d.oppBet, "oppbet"
                        s.send("FOLD\n")
                    # raise!
                    elif handRank >= bettingNums[
                            0] and actionType != "FOLD CALL":
                        raiseAmount = d.maxRaise / 2.0 + d.minRaise / 2.0  # good amount?
                        s.send("RAISE:" + str(raiseAmount) + "\n")
                    # call no matter what
                    elif handRank >= bettingNums[1]:
                        print "call 1"
                        s.send("CALL\n")
                        print "handRank threshold call"
                    elif d.oppBet <= myStack / 4.0 and len(d.board) == 0:
                        print "call 2"
                        s.send("CALL\n")
                    # call big bet
                    elif len(d.board) == 0:
                        s.send("FOLD\n")
                    elif handRank >= bettingNums[
                            2] and d.oppBet <= myStack / 2.0:
                        print "call 3"
                        s.send("CALL\n")
                    elif handRank >= bettingNums[
                            3] and d.oppBet <= myStack / 4.0:
                        print "call 4"
                        s.send("CALL\n")
                    else:

                        # FOLD MORE OFTEN TO DEFEAT FORD
                        # FOLD LESS OFTEN TO DEFEAT BLUFFBOT
                        # could be 3, could be 2
                        if d.oppBet <= d.handRank / 2:
                            print "betsize 1 call", d.oppBet
                            s.send("CALL\n")

                        elif d.potSize - d.oppBet >= d.oppBet:
                            print "betsize 2 call"
                            s.send("CALL\n")
                        else:
                            s.send("FOLD\n")


# shitty hand (30% ish)
# raise on pre flop
# what to do?
# -> usually a good hand

#pokerbots
# protect card for hopeful straight (50% chance) in discard method

# also, dont raise a raise unless hand is REAL good >97
# no big bets on third round tho

            elif word == "HANDOVER":
                pass
            # if d.cardsShown and d.winner == d.oppName:
            # differnt amounts of overall bets
            # near / all-in
            #    if d.winnersPot >= 2.0 * d.stackSize - 5.0:
            #       oppPercent = d.pw.getWinPercentage()
            #      minOppPercent = min(oppPercent,minOppPercent)

            elif word == "REQUESTKEYVALUES":
                s.send("FINISH\n")
        s.close()
Example #23
0
def runDemo():

	print("\n")
	animatedprint("Hi, welcome to our short demo.")
	printsleep(1.0)
	animatedprint("Here we'll be doing a recompile and quick runthrough of all of our code.")
	animatedprint("---------------------------------------------------------------------------\n")
	printsleep(3.0)

	mapfile = "map2.txt"
	qt_threshold = 5
	actual_to_max_children_ratio = 0.75
	max_num_adjacent = 30

	os.chdir(os.path.dirname(os.path.abspath(__file__)))
	os.chdir("QTData")
	animatedprint("We'll start by compiling CreateQT.exe...")
	os.system("gcc -std=c99 -o CreateQT CreateQT.c")
	print("Done!\n")
	printsleep(2.0)
	animatedprint("Now we create the quadtree of %s with parameters"%mapfile)
	print("\tqt_threshold = %d"%qt_threshold)
	printsleep(0.1)
	print("\tactual_to_max_children_ratio = %f"%actual_to_max_children_ratio)
	printsleep(0.1)
	print("\tmax_num_adjacent = %d"%max_num_adjacent)
	printsleep(2.0)
	os.system("CreateQT.exe {0} {1} {2} {3}".format(mapfile,qt_threshold,actual_to_max_children_ratio,max_num_adjacent))
	print("Done!\n")
	printsleep(2.0)
	os.chdir("..")

	animatedprint("Now we'll find the shortest path between all nodes...")
	printsleep(2.0)
	import QTPathFinder as qt
	pack,pdict = qt.findAllPaths()
	mngr = plt.get_current_fig_manager()
	geom = mngr.window.geometry()
	x, y, dx, dy = geom.getRect()
	mngr.window.setGeometry(100, 100, dx, dy)
	plt.show()
	plt.pause(0.5)
	print("Done!\n")
	printsleep(1.0)

	animatedprint("Now let's find some paths. Bring up the figure if you don't already see it.")
	import DataParser as dp
	import HelperMethods as hm
	#To get legal points
	limits, rectVects = dp.parseQT(justTheRectangles=True)

	xrng = limits[1]-limits[0]
	yrng = limits[3]-limits[2]
	numPaths = 150

	startPoints = [(xrng*(random.random()),yrng*(random.random())) for i in range(numPaths)]
	endPoints = [(xrng*(random.random()),yrng*(random.random())) for i in range(numPaths)]
	removal = []
	for p in startPoints:
		if hm.isInsideRect(p,rectVects):
			removal.append(p)
		pass
	pass
	for r in removal:
		startPoints.remove(r)
	pass

	removal = []
	for p in endPoints:
		if hm.isInsideRect(p,rectVects):
			removal.append(p)
		pass
	pass
	for r in removal:
		endPoints.remove(r)
	pass
	printsleep(3.0)
	animatedprint("Begin plotting...")
	printsleep(1.0)

	demoPoints = dict(zip(startPoints,endPoints))

	line = lines.Line2D([], [], lw=2, c='red')
	ax = pack[2]
	ax.add_line(line)

	for s,e in demoPoints.items():
		line = qt.plotPathWithResults(pack,pdict,s,e,line)
		print("Start: ({:f},{:f}) | End: ({:f},{:f})".format(s[0],s[1],e[0],e[1]))
		sys.stdout.flush()
		plt.show()
		plt.pause(0.001)
	pass
	print("...Done!")
	printsleep(1.0)

	animatedprint("\nThis ends the demo. Thanks for watching!")
	plt.pause(5.0)
Example #24
0
def main():
    """
	Add Event Demo
	"""
    credentials = get_credentials()
    http = credentials.authorize(httplib2.Http())
    service = discovery.build('calendar', 'v3', http=http)

    now = datetime.datetime.utcnow().isoformat(
    ) + 'Z'  # 'Z' indicates UTC time
    # Refer to the Python quickstart on how to setup the environment:
    # https://developers.google.com/google-apps/calendar/quickstart/python
    # Change the scope to 'https://www.googleapis.com/auth/calendar' and delete any
    # stored credentials.

    event = {
        'summary': '這是summary',
        'location': '某個地點',
        'description': '敘述文字~\n敘述',
        'start': {
            'dateTime': '2017-11-06T09:00:00+08:00',
            'timeZone': 'Asia/Taipei',
        },
        'end': {
            'dateTime': '2017-11-06T11:00:00+08:00',
            'timeZone': 'Asia/Taipei',
        },
        'recurrence': [],
        'attendees': [],
        'reminders': {
            'useDefault': False,
            'overrides': [],
        },
    }

    # get calendarId
    page_token = None
    while True:
        calendar_list = service.calendarList().list(
            pageToken=page_token).execute()
        print(calendar_list)
        for calendar_list_entry in calendar_list['items']:
            print(calendar_list_entry['summary'])
        page_token = calendar_list.get('nextPageToken')
        if not page_token:
            break

    #event = service.events().insert(calendarId='*****@*****.**', body=event).execute()
    #print(event)
    #print("Event created:{} ".format(event.get('htmlLink')))

    import DataParser
    for item in DataParser.getEvents():
        event = {
            'summary':
            item['country'] + "-" + item['title'],
            'description':
            '前值:{}\n預測:{}\n'.format(item['history'], item['prediction']),
            'start': {
                'dateTime': item['datetime'],
                'timeZone': 'Asia/Taipei',
            },
            'end': {
                'dateTime': item['datetime'],
                'timeZone': 'Asia/Taipei',
            },
        }
Example #25
0
import DataParser
import DataUtils
import NetworkAccess
import GetUrl

if __name__ == "__main__":
    CreateTable = True
    for i in range(0, 249, 25):
        url = GetUrl.Get_Data_Url(i)
        html_data = NetworkAccess.GetDataUrl(url)
        infoes = DataParser.Get_page_content(url, 0, html_data)
        if infoes == 0:
            pass
        else:
            item_list = DataUtils.ItemUtils()
            for i in range(len(infoes)):
                item_list.append(infoes[i]["name"], infoes[i]["content"], infoes[i]["score"], infoes[i]["say"])
                pass
           # item_list.clear()

            pass
        pass
    item_list.save_excel(sheet="шби", file="list.xlsx")
    item_list.save_database(CreateTable)
    pass
Example #26
0
import pygame.camera
import time
from ImageGetter import *
from AzureRequest import *
from DataParser import *
from DataAnalyzer import *

stop = False

pygame.camera.init()
cam = pygame.camera.Camera(pygame.camera.list_cameras()[0])
cam.start()
ar = AzureRequest()
data_parser = DataParser()

while not stop:
    img_getter = ImageGetter()
    capture = img_getter.get_image(cam)
    data_parser.set_data(
        ar.request(
            '{url: \'http://newsrescue.com/wp-content/uploads/2015/04/happy-person.jpg\'}'
        ))
    print data_parser.parse()
    time.sleep(3)

pygame.camera.quit()
import DataParser as dps
import DataPloter as dpl

sFile = 'RawData.csv'
tFile = 'Preprocess.csv'
tNo = [1]
xMax = 386
yMax = 30000
sRate = 1000  # sampling rate
period = 0.2
#------------------------------------------------------------------------------------------------
if __name__ == '__main__':

    s = dps.Parser()  # new objects
    s.parseRawFile(sFile, tFile, tNo)

    l = dpl.Ploter()
    l.showSignal(tFile, xMax, yMax, sRate)
Example #28
0
import DataParser
import time
import random
import csv
from collections import Counter
import pathlib

url = 'https://your-url.com/parameters'

Data = DataParser.dataparser(url)

#Get relevant sales data from the html pasrsing
Title = Data[0]
Price = Data[1]
Size = Data[2]
Brand = Data[3]
Words = Data[4]

#write sales data to file
with open('FileName.csv', mode='a') as FileName:
    csv_writer = csv.writer(FileName,
                            delimiter=',',
                            quotechar='"',
                            quoting=csv.QUOTE_MINIMAL)

    csv_writer.writerow(Title)
    csv_writer.writerow(Price)
    csv_writer.writerow(Size)
    csv_writer.writerow(Brand)

#check to see if the keyword file exists
 def __init__(self):
     input_lines = DataParser.parse_csv_file()
     self.__products_storage = ProductsStorage(input_lines)
import DataParser
import plotly
from plotly.graph_objs import Bar, Layout

player_data = DataParser.format_data_list(printing=False)

ratings = [float(player_data[x][-1]) for x in range(0, len(player_data))]
quarterbacks = [player_data[x][1] for x in range(0, len(player_data))]

plotly.offline.plot({
    "data": [Bar(x=quarterbacks, y=ratings)],
    "layout": Layout(title="Passer Ratings")
})
Example #31
0
def logic():
    # initialization
    db = DatabaseHandler()
    db.fill_database(DataParser.fetch_data())
    return
Example #32
0
INPUT_SIZE = 1
OUTPUT_SIZE = 1
TRAIN_BATCH_SIZE = 10
CELL_NUM = 64
CELL_LAYER_NUM = None

NN_TYPE = 'LSTM'
LR = 0.01
IS_LR_DECAY = False
TRAIN_EPOCH = 1000
GLOBAL_EPOCH = 0
PRED_TIME_RANGE = 325

#-----data import----#

raw_R_peak = dp.read_data_from_txt(DATA_PATH + '103Rpeak.txt')
print(raw_R_peak.shape)
# plt.plot(raw_R_peak)
# plt.show()
time_interval = get_interval(raw_R_peak)
# time_interval = (time_interval - np.mean(time_interval)) / np.std(time_interval)
# plt.plot(time_interval)
# plt.show()
print(time_interval.shape)
train_num = int(time_interval.shape[0] * TRAIN_PROB)
# train_num = 20
test_num = int(time_interval.shape[0] * (1 - TRAIN_PROB))
train_time_interval = time_interval[:train_num]
# standardization
train_time_interval_std = (train_time_interval - np.mean(train_time_interval)
                           ) / np.std(train_time_interval)
Example #33
0

def get_matrix(pred, gt):
    TP = np.sum(np.logical_and(pred == 2, gt == 2))
    TN = np.sum(np.logical_and(pred == 0, gt == 0))
    FP = np.sum(np.logical_and(pred == 2, gt == 0))
    FN = np.sum(np.logical_and(pred == 0, gt == 2))
    confusion_matrix = np.matrix([[TN, FP], [FN, TP]])
    return confusion_matrix


if __name__ == '__main__':
    args = docopt(__doc__, version='1.0')
    if args['tts']:
        import MLModel as ml
        parser1 = dp.DataParser(args['-d'], dp.Mode.TRAIN)
        all_data = parser1.parseData()
        X = []
        Y = []
        all_names = []
        all_hits = []
        all_samples = []
        all_valid_gt = []
        for data in all_data:
            sample_name = data['name']
            hits = data['hits']
            sample = data['sample']
            ground_truth = data['ground_truth']
            valid_gt = get_valid_gt(ground_truth, sample)
            all_names.append(sample_name)
            all_hits.append(hits)
Example #34
0
def get_matrix(pred, gt):
    TP = np.sum(np.logical_and(pred == 2, gt == 2))
    TN = np.sum(np.logical_and(pred == 0, gt == 0))
    FP = np.sum(np.logical_and(pred == 2, gt == 0))
    FN = np.sum(np.logical_and(pred == 0, gt == 2))
    confusion_matrix = np.matrix([[TN, FP], [FN, TP]])
    return confusion_matrix

if __name__ == '__main__':
    args = docopt(__doc__, version='1.0')
    #print(args)
    if args['ml']:
        import MLModel as ml
        if args['train']:
            parser = dp.DataParser(args['-d'], dp.Mode.TRAIN)
            all_data = parser.parseData()
            X = []
            Y = []
            for data in all_data:
                sample_name = data['name']
                hits = data['hits']
                sample = data['sample']
                ground_truth = data['ground_truth']
                valid_gt = get_valid_gt(ground_truth, sample)
                for (peak_num, name, confidence) in valid_gt:
                    if confidence == 2:
                        gt = [0, 1]
                    elif confidence == 0:
                        gt = [1, 0]
                    sample_spectrum = sample[peak_num - 1]['spectrum']
Example #35
0
import sys
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis

dirname = os.path.dirname(__file__)
sys.path.append("../data_parser")
import DataParser

#import data
subjects = [
    "Religion", "Sinhala", "English", "Mathematics", "Science", "History",
    "Geography", "Citizenship Education", "Health", "Tamil", "Art", "PTS"
]
marks_all = DataParser.get_marks(subjects, index='no')

#handle missing values
marks_all = DataParser.handle_missing_values(marks_all, how='-1', is_nan=True)
marks_all = DataParser.handle_missing_values(marks_all, how='-1', is_nan=False)
marks_all = marks_all.replace({-1: np.nan})
marks_all = marks_all.fillna(method='bfill', axis=1)
marks_all = marks_all.fillna(method='ffill', axis=1)
marks_all = marks_all.dropna(axis=0)
"""
PCA
"""
col_all = marks_all.columns
principalDf = pd.DataFrame()
subs_ex_ratios = []
for sub_year in range(int(len(col_all) / 3)):
Example #36
0
    def run(self, input_socket):
        f_in = input_socket.makefile()

        # amount bet after flop / 200 - percentage of hand/board
        # cut off for accepting my bets: my bets/200  - percentage of hand/board
        # preflop bet /200 - percentage of starting hand

        # global variables
        d = DataParser()
        minOppPercent = 100 # minimum percept opp goes all in on 
        discardRound = 0 # 0 if preflop, 1 if preturn, 2 if preriver
        quitWhileAheadMode = False
        winMode = False
        onEdgeMode = False
        quitRank = 52 # max rank to call in quitwhileaheadmode
        myStack = 200
        myBank = 0
        handID = 1
        numHands = 1000
        bc = BetCalc()

        
        while True:
            data = f_in.readline().strip()
            if not data:
                print "Gameover, engine disconnected."
                break
            print data

            d.parse(data)

            word = d.word

            if word == "NEWGAME":
                myStack = d.stackSize
                numHands = d.numHands
            elif word == "NEWHAND":
                myBank = d.myBank
                startingHandRank = d.startingHandRank
                handID = d.handID
            elif word == "GETACTION":

                actionType = d.actionType
                handRank = d.handRank

                # opponent checked
                if actionType == "CHECK BET/RAISE": #starting round
                    s.send("RAISE:200\n")

                # discard round
                elif actionType == "CHECK DISCARD DISCARD":
                    s.send(d.shouldDiscard)

                # opponent bet and it wasnt all-in
                elif actionType == "FOLD CALL RAISE" or actionType == "FOLD CALL": #starting round
                    s.send("CALL\n")
# shitty hand (30% ish)
# raise on pre flop
# what to do?
# -> usually a good hand

#pokerbots
# protect card for hopeful straight (50% chance) in discard method

                    # also, dont raise a raise unless hand is REAL good >97
                    # no big bets on third round tho


            elif word == "HANDOVER":
                pass
               # if d.cardsShown and d.winner == d.oppName:
                    # differnt amounts of overall bets
                    # near / all-in
                #    if d.winnersPot >= 2.0 * d.stackSize - 5.0:
                 #       oppPercent = d.pw.getWinPercentage()
                  #      minOppPercent = min(oppPercent,minOppPercent)

            elif word == "REQUESTKEYVALUES":
                s.send("FINISH\n")
        s.close()