コード例 #1
0
def infer_process(exe, program, reader, fetch_vars, dataset):
    """
    the function to execute the infer process
    :param exe: the fluid Executor
    :param program: the infer_program
    :param reader: data reader
    :return: the list of prediction result
    """
    def input_check(data):
        if data[0]['words'].lod()[0][-1] == 0:
            return data[0]['words']
        return None

    results = []
    for data in reader():
        crf_decode = input_check(data)
        if crf_decode:
            results += utils.parse_result(crf_decode, crf_decode, dataset)
            continue

        words, crf_decode = exe.run(
            program,
            fetch_list=fetch_vars,
            feed=data,
            return_numpy=False,
            use_program_cache=True,
        )
        results += utils.parse_result(words, crf_decode, dataset)
    return results
コード例 #2
0
def test_process(exe, program, reader, test_ret):
    """
    the function to execute the infer process
    :param exe: the fluid Executor
    :param program: the infer_program
    :param reader: data reader
    :return: the list of prediction result
    """
    test_ret["chunk_evaluator"].reset()

    start_time = time.time()
    for data in reader():

        nums_infer, nums_label, nums_correct = exe.run(
            program,
            fetch_list=[
                test_ret["num_infer_chunks"],
                test_ret["num_label_chunks"],
                test_ret["num_correct_chunks"],
            ],
            feed=data,
        )

        test_ret["chunk_evaluator"].update(nums_infer, nums_label,
                                           nums_correct)
    precision, recall, f1 = test_ret["chunk_evaluator"].eval()
    end_time = time.time()
    print("[test] P: %.5f, R: %.5f, F1: %.5f, elapsed time: %.3f s" %
          (precision, recall, f1, end_time - start_time))
    return eval("%.5f" % f1)
コード例 #3
0
def main_fight(accuracyfight=0.91):
    with graph.as_default():
        #with tf.Graph().as_default():

        np.random.seed(1234)
        model22 = fight(tf)

        aman = {}
        # if os.path.exists('./tmp.mp4'):
        #     os.remove('./tmp.mp4')
        #filev = request.files['file']
        # file = open("tmp.mp4", "wb")
        # file.write(filev.read())
        # file.close()
        vid = reader(cv2, "hdfight.mp4")
        datav = np.zeros((1, 30, 160, 160, 3), dtype=np.float)
        datav[0][:][:] = vid
        millis = int(round(time.time() * 1000))

        f, precent = pred_fight(model22, datav, acuracy=0.65)

        aman = {'fight': f, 'percentegeoffight': str(precent)}

        millis2 = int(round(time.time() * 1000))
        aman['processing_time'] = str(millis2 - millis)
        resnd = jsonify(aman)
        resnd.status_code = 200
        print('value is:', aman)
        return resnd
コード例 #4
0
ファイル: dataIO.py プロジェクト: Meteodan/research
    def open_file(self, file_name, mode=None, format='nc'):
        """
        open_file() [public]
        Purpose:    Opens a file with the given mode.
        Parameters:    file_name [type=string]
                        Name of the file to open.
                    mode [type=string]
                        How to open the file (e.g. 'r' for reading, 'rw' for reading and writing, etc.)
                        Passing None defaults to opening for reading only.
        Returns:    [nothing]
        """
        # Set the mode default
        if mode is None: mode = 'r'

        # If the file is already open, close it.
        if self._df is not None:
            self.close()

        self._file_name = file_name
        self._format = format

        # Figure out whether we're reading, writing, or both.
        self._read = (mode.find('r') > -1)
        self._write = (mode.find('w') > -1 or mode == 'r+')

        #       if self._write:
        #           warning("DataIO: Writing is currently not supported.  Opening as read-only.")
        #           self._write = False

        if reader.__name__ == "Nio":
            # If the reader is PyNIO open it with PyNIO's open command
            self._df = reader.open_file(file_name, mode=mode, format=format)
        elif reader.__name__ == "scipy.io.netcdf":
            # If the reader is scipy, open it with scipy's open command
            if self._read:
                self._df = reader.netcdf_file(file_name, 'r')
            elif self._write:
                self._df = reader.netcdf_file(file_name, 'w')
        elif reader.__name__ == "Dataset":
            # If the reader is netCDF4, open it with that open command
            if self._read:
                self._df = reader(file_name, mode='r')
            elif self._write:
                self._df = reader(file_name, mode='a')
        return
コード例 #5
0
ファイル: dataIO.py プロジェクト: pulsatrixwx/PulsatrixWx
    def open_file(self, file_name, mode=None):
        """
        open_file() [public]
        Purpose:    Opens a file with the given mode.
        Parameters:    file_name [type=string]
                        Name of the file to open.
                    mode [type=string]
                        How to open the file (e.g. 'r' for reading, 'rw' for reading and writing, etc.)
                        Passing None defaults to opening for reading only.
        Returns:    [nothing]
        """
        # Set the mode default
        if mode is None: mode = 'r'

        # If the file is already open, close it.
        if self._df is not None:
            self.close()

        self._file_name = file_name

        # Figure out whether we're reading, writing, or both.
        self._read = (mode.find('r') > -1)
        self._write = (mode.find('w') > -1 or mode == 'r+')

#       if self._write:
#           warning("DataIO: Writing is currently not supported.  Opening as read-only.")
#           self._write = False

        if reader.__name__ == "Nio":
            # If the reader is PyNIO open it with PyNIO's open command
            self._df = reader.open_file(file_name, mode=mode)
        elif reader.__name__ == "scipy.io.netcdf":
            # If the reader is scipy, open it with scipy's open command
            if self._read:
                self._df = reader.netcdf_file(file_name, 'r')
            elif self._write:
                self._df = reader.netcdf_file(file_name, 'w')
        elif reader.__name__ == "Dataset":
            # If the reader is netCDF4, open it with that open command
            if self._read:
                self._df = reader(file_name, mode='r')
            elif self._write:
                self._df = reader(file_name, mode='a')
        return
コード例 #6
0
def get_batch(reader, batch_size):
    example_batch = []
    label_batch = []
    for example, label in reader():
        example_batch.append(example)
        label_batch.append(label)
        if len(example_batch) >= batch_size:
            yield example_batch, label_batch
            example_batch = []
            label_batch = []
コード例 #7
0
def get_train_multiprocess_reader(reader, datasets, process_num=10):
    groups = []
    process_num = min(process_num, len(datasets))
    for i in range(process_num):
        groups.append(datasets[i::process_num])

    readers = []
    for group in groups:
        readers.append(reader(group))

    return paddle.reader.multiprocess_reader(readers, use_pipe=True)
コード例 #8
0
def get_batch(reader, batch_size):
    example_batch = []
    for _ in range(FLAGS.slot_nums + 2):
        example_batch.append([])
    for example in reader():
        for i in range(len(example)):
            example_batch[i].append(example[i])
        if len(example_batch[0]) >= batch_size:
            yield example_batch
            for _ in range(FLAGS.slot_nums + 2):
                example_batch[_] = []
コード例 #9
0
def gen_cell_meta_valuetest(path):
    scan_result_list=[]
    files=os.listdir(path)
    for file in  files:
        file_name=os.path.splitext(file)[0]
        print(file_name)
        if file_name.count('cell_decision')>0:
            avro_reader=reader(open(file_name,"rb"))
        else:
            avro_reader=None
        for scan_result in avro_reader:
            scan_result_list.append(scan_result)
コード例 #10
0
class Fib(object):
    # //users.avro
    avro_path = "/home/jiayachong/avrome/util/datacreated/users.avro"

    # def gen_cell_meta_valuetest(avro_path):
    #     if os.path.isdir(avro_path):
    #         path=avro_path+'/*.avro'
    #         for filename in glob.glob(path):
    #             avro_reader=reader(open(filename,"rb"))
    #     else:
    #         avro_reader=reader(open(avro_path,"rb"))
    #     scan_result_list=[]
    #     for scan_result in avro_reader:
    #         print(scan_result)
    #         scan_result_list.append(scan_result)
    #
    # gen_cell_meta_valuetest(path)

    def __call__(self, num, *args, **kwargs):
        n, m, lst = 0, 1, []

        for i in range(num):
            lst.append(n)
            n, m = m, n + m

        return lst

    if os.path.isdir(avro_path):
        path = avro_path + '/*.avro'
    for filename in glob.glob(avro_path):
        avro_reader = reader(open(filename, 'rb'))
        # pass
    else:
        avro_reader = reader(open(avro_path, 'rb'))
        scan_result_list = []
        for scan_result in avro_reader:
            print(scan_result)

            scan_result_list.append(scan_result)
コード例 #11
0
ファイル: randTest.py プロジェクト: houstonst/lin-kernighan
def randTest():
    #clear the console
    clear = lambda: os.system('cls')
    clear()

    #accept input
    print("Enter a .csv or .txt file [example.csv or example.txt]:\n")
    inp = input()
    filepath = "./tests/" + inp
    clear()

    print("Enter maximum number of random tours to improve:\n")
    rawCap = input()
    cap = int(rawCap)
    clear()

    print("Enter increment value for number of random tours to improve:\n")
    rawInc = input()
    inc = int(rawInc)
    clear()

    print("Choose a solmax:\n")
    rawsol = input()
    solmax = int(rawsol)
    clear()

    #import after accepting input or else GUI runs annoyingly
    from lk import lin
    import staticVars as sv
    from lkUtils import calculate, stringify, sleeper
    from genAlgos.rand import randomTour

    #form GUI and weighted graph
    sv.cityNames, sv.rawCoords, sv.guiCoords = reader(filepath, sv.height,
                                                      sv.width)
    sv.wg = weightedGraph(sv.rawCoords)

    #iterate through random tours
    bestCost = 999999999
    iterationList = [i for i in range(1, cap + 1, inc)]
    for iteration in iterationList:
        print("<<< {} RANDOM TOUR(S) >>>".format(iteration))
        for i in range(iteration):
            print("-running iteration {}".format(i + 1))
            randTour, randCost = randomTour(sv.rawCoords, sv.cityNames, False)
            cost = lin(randTour, randCost, solmax, 0, "test2", None, None)
            if cost < bestCost:
                bestCost = cost
        print("-best cost: {}\n".format(bestCost))
コード例 #12
0
        def test_process(reader, chunk_evaluator):
            start_time = time.time()
            for batch in reader():
                words, targets, length = batch
                crf_decode = model(words, length=length)
                (precision, recall, f1_score, num_infer_chunks,
                 num_label_chunks,
                 num_correct_chunks) = chunk_eval(input=crf_decode,
                                                  label=targets,
                                                  seq_length=length)
                chunk_evaluator.update(num_infer_chunks.numpy(),
                                       num_label_chunks.numpy(),
                                       num_correct_chunks.numpy())

            precision, recall, f1 = chunk_evaluator.eval()
            end_time = time.time()
            print("[test] P: %.5f, R: %.5f, F1: %.5f, elapsed time: %.3f s" %
                  (precision, recall, f1, end_time - start_time))
コード例 #13
0
ファイル: train.py プロジェクト: zxy3/data_enhance
def eval_net(reader, model):
    acc_set = []

    for batch_id, data in enumerate(reader()):
        dy_x_data = np.array([x[0] for x in data]).astype('float32')
        y_data = np.array([x[1] for x in data]).astype('int')
        y_data = y_data[:, np.newaxis]
        img = fluid.dygraph.to_variable(dy_x_data)
        label = fluid.dygraph.to_variable(y_data)
        label.stop_gradient = True
        prediction, acc = model(img, label)

        acc_set.append(float(acc.numpy()))

        # get test acc and loss
    acc_val_mean = np.array(acc_set).mean()

    return acc_val_mean
コード例 #14
0
 def __impl__():
     res = []
     for item in reader():
         res.append(item)
         if len(res) == count:
             yield res
             res = []
     if len(res) == count:
         yield res
     elif not clip_last:
         data = []
         for item in res:
             data += item
         if len(data) > count:
             inst_num_per_part = len(data) // count
             yield [
                 data[inst_num_per_part * i:inst_num_per_part * (i + 1)]
                 for i in range(count)
             ]
コード例 #15
0
    def train_loop(exe, trainer_prog, trainer_id=0, reader=train_reader):
        embedding_name = 'emb'
        embedding_param = fluid.global_scope().find_var(
            embedding_name).get_tensor()
        embedding_param.set(word_vector_values, place)

        batch_id = 0
        for pass_id in xrange(num_passes):
            chunk_evaluator.reset(exe)
            start_time = time.time()
            with profiler.profiler(
                    "CPU", 'total',
                    profile_path="/usr/local/nvidia/lib64/tmp") as prof:
                for data in reader():
                    cost, batch_precision, batch_recall, batch_f1_score = exe.run(
                        trainer_prog,
                        feed=feeder.feed(data),
                        fetch_list=[avg_cost] + chunk_evaluator.metrics)
                    if batch_id % 5 == 0:
                        print("Pass " + str(pass_id) + ", Batch " +
                              str(batch_id) + ", Cost " + str(cost[0]) +
                              ", Precision " + str(batch_precision[0]) +
                              ", Recall " + str(batch_recall[0]) +
                              ", F1_score" + str(batch_f1_score[0]))
                    batch_id = batch_id + 1

                pass_precision, pass_recall, pass_f1_score = chunk_evaluator.eval(
                    exe)
                spent = time.time() - start_time
                print("pass_id: %d, precision: %f, recall: %f, f1: %f, spent: %f, speed: %f" % \
                      (pass_id, pass_precision, pass_recall, pass_f1_score,
                      spent, 14987.0 / spent))
                pass_precision, pass_recall, pass_f1_score = test(
                    exe, chunk_evaluator, inference_program, test_reader,
                    place)
                print("[TestSet] pass_id:" + str(pass_id) +
                      " pass_precision:" + str(pass_precision) +
                      " pass_recall:" + str(pass_recall) + " pass_f1_score:" +
                      str(pass_f1_score))
コード例 #16
0
ファイル: genTest.py プロジェクト: houstonst/lin-kernighan
def genTest():
    #import after accepting input or else GUI runs annoyingly
    from lk import lin
    import staticVars as sv
    from genAlgos.fi import farthestInsertion
    from genAlgos.nn import nearestNeighbor
    from lkUtils import calculate, stringify, sleeper
    from genAlgos.rand import randomTour

    #iterate through files and run lin-kernighan
    files = [
        "6.csv", "11.csv", "14.csv", "26.csv", "29.csv", "48.csv", "52.csv",
        "76.csv", "100.csv", "105.csv", "107.csv", "120.csv", "152.txt",
        "195.csv", "200.txt", "225.txt", "299.txt", "318.txt", "439.txt",
        "575.txt"
    ]
    algos = ["nearest", "farthest", "random"]
    for solmax in range(1, 10, 2):
        for j in range(len(files)):

            #execute
            print("<<< TESTING {} ON SOLMAX {} >>>".format(files[j], solmax))
            filepath = "./tests/" + files[j]
            for k in range(len(algos)):
                #data header
                print("{}:".format(algos[k]))

                #form GUI and weighted graph
                sv.cityNames, sv.rawCoords, sv.guiCoords = reader(
                    filepath, sv.height, sv.width)
                sv.wg = weightedGraph(sv.rawCoords)

                #create initial tour
                start = time.time()
                tour = None
                cost = None
                if algos[k] == "farthest" and files[j] == '575.txt':
                    tour = [
                        23, 22, 21, 44, 20, 43, 19, 42, 41, 18, 17, 16, 40, 39,
                        15, 14, 12, 13, 35, 58, 36, 59, 82, 60, 37, 38, 61, 62,
                        63, 64, 86, 83, 84, 85, 110, 109, 131, 108, 107, 129,
                        106, 81, 104, 105, 128, 130, 153, 152, 151, 150, 149,
                        127, 148, 126, 125, 124, 123, 101, 103, 102, 80, 79,
                        78, 77, 76, 100, 99, 98, 75, 97, 96, 74, 73, 51, 50,
                        28, 29, 52, 53, 31, 54, 55, 32, 56, 57, 33, 34, 11, 10,
                        9, 7, 8, 30, 6, 4, 5, 27, 26, 3, 2, 1, 25, 24, 47, 48,
                        49, 72, 70, 71, 93, 94, 95, 117, 116, 139, 141, 118,
                        119, 120, 142, 143, 144, 121, 122, 147, 146, 170, 169,
                        168, 145, 167, 190, 166, 165, 164, 163, 140, 162, 185,
                        186, 209, 208, 231, 232, 233, 210, 211, 187, 188, 189,
                        212, 213, 236, 235, 237, 214, 215, 238, 239, 240, 241,
                        218, 217, 216, 191, 192, 193, 195, 194, 171, 172, 173,
                        174, 175, 196, 197, 219, 220, 221, 198, 199, 200, 222,
                        224, 223, 246, 247, 248, 272, 271, 270, 292, 269, 268,
                        267, 245, 244, 243, 242, 265, 266, 290, 289, 288, 264,
                        263, 262, 286, 287, 309, 310, 311, 312, 334, 335, 358,
                        357, 356, 333, 355, 354, 353, 352, 329, 331, 332, 308,
                        330, 307, 305, 328, 351, 350, 326, 327, 303, 304, 281,
                        283, 306, 284, 285, 261, 260, 282, 259, 258, 234, 256,
                        254, 255, 277, 257, 280, 279, 278, 302, 301, 300, 325,
                        324, 323, 346, 347, 348, 349, 371, 370, 372, 395, 394,
                        393, 369, 392, 416, 417, 415, 438, 439, 440, 442, 441,
                        420, 421, 419, 418, 396, 397, 375, 373, 374, 376, 377,
                        400, 399, 398, 422, 423, 424, 425, 401, 378, 379, 380,
                        381, 382, 405, 404, 403, 402, 426, 449, 450, 472, 471,
                        448, 447, 446, 470, 469, 491, 468, 445, 444, 443, 467,
                        466, 490, 489, 512, 511, 488, 465, 464, 486, 463, 462,
                        461, 484, 485, 487, 510, 509, 508, 507, 530, 553, 554,
                        555, 531, 532, 556, 533, 534, 535, 557, 558, 559, 560,
                        536, 513, 537, 561, 562, 539, 538, 515, 514, 492, 516,
                        493, 494, 517, 540, 541, 518, 495, 496, 519, 520, 544,
                        543, 542, 565, 564, 563, 566, 567, 545, 568, 546, 569,
                        571, 573, 572, 570, 547, 548, 524, 525, 526, 527, 549,
                        550, 551, 574, 0, 552, 528, 529, 506, 483, 482, 460,
                        459, 458, 435, 436, 434, 433, 432, 456, 457, 479, 455,
                        454, 453, 476, 477, 478, 480, 481, 505, 504, 503, 502,
                        501, 500, 523, 522, 521, 499, 498, 497, 475, 474, 473,
                        452, 451, 427, 428, 429, 406, 407, 430, 431, 408, 409,
                        387, 410, 388, 411, 412, 389, 413, 414, 437, 391, 390,
                        367, 368, 366, 342, 365, 341, 364, 386, 363, 361, 385,
                        384, 383, 359, 360, 337, 336, 313, 314, 291, 293, 315,
                        316, 339, 338, 362, 340, 317, 318, 294, 295, 296, 297,
                        320, 319, 343, 344, 345, 322, 321, 298, 299, 276, 275,
                        274, 273, 250, 251, 252, 253, 230, 229, 228, 227, 249,
                        225, 226, 202, 203, 179, 201, 177, 176, 178, 154, 155,
                        132, 133, 156, 157, 180, 182, 181, 204, 205, 206, 207,
                        184, 183, 160, 161, 137, 159, 158, 136, 135, 134, 111,
                        112, 113, 114, 138, 115, 91, 92, 90, 89, 88, 87, 65,
                        66, 67, 68, 69, 45, 46, 23
                    ]
                    cost = 7542.52
                elif algos[k] == "farthest":
                    tour, cost = farthestInsertion(sv.rawCoords, sv.guiCoords,
                                                   sv.cityNames, sv.height,
                                                   sv.width, "2", False)
                elif algos[k] == "nearest":
                    tour, cost = nearestNeighbor(sv.rawCoords, sv.guiCoords,
                                                 sv.cityNames, sv.height,
                                                 sv.width, "2", False)
                else:
                    tour, cost = randomTour(sv.rawCoords, sv.cityNames, False)

                #print tour info
                end = time.time()
                runtime = end - start

                #run lin-kernighan
                lin(tour, cost, solmax, runtime, "test1", None, None)

            if algos[k] == "nearest" or algos[k] == "farthest":
                print("\n")
コード例 #17
0
def solve():
    #clear the console
    clear = lambda: os.system('cls')
    clear()

    #accept input
    print("Enter a .csv or .txt file [example.csv or example.txt]:\n")
    inp = input()
    filepath = "./tests/" + inp
    clear()

    print("Pick a path generation algorithm (random by default):")
    print("""-farthest: Farthest Insertion
-nearest: Nearest Neighbor
-fixed: Fixed Tour defined in code\n""")
    algo = input()
    clear()

    print("Choose a solmax:\n")
    rawsol = input()
    solmax = int(rawsol)
    clear()

    #import after accepting input or else GUI runs annoyingly
    from lk import lin
    import staticVars as sv
    from genAlgos.fi import farthestInsertion
    from genAlgos.nn import nearestNeighbor
    from lkUtils import calculate, stringify
    from genAlgos.rand import randomTour

    #form GUI and weighted graph
    sv.cityNames, sv.rawCoords, sv.guiCoords = reader(filepath, sv.height,
                                                      sv.width)
    sv.wg = weightedGraph(sv.rawCoords)

    #create initial tour
    start = time.time()
    tour = None
    cost = None
    if algo == "farthest":
        tour, cost = farthestInsertion(sv.rawCoords, sv.guiCoords,
                                       sv.cityNames, sv.height, sv.width, "2",
                                       False)
    elif algo == "nearest":
        tour, cost = nearestNeighbor(sv.rawCoords, sv.guiCoords, sv.cityNames,
                                     sv.height, sv.width, "2", False)
    elif algo == "fixed":
        tour = [
            0, 5, 42, 24, 10, 45, 35, 4, 26, 2, 29, 34, 41, 16, 22, 3, 23, 14,
            25, 13, 11, 12, 15, 40, 9, 1, 8, 38, 31, 44, 18, 7, 28, 6, 37, 19,
            27, 17, 43, 30, 36, 46, 33, 20, 47, 21, 32, 39, 0
        ]
        cost = calculate(tour)
    else:
        tour, cost = randomTour(sv.rawCoords, sv.cityNames, False)

    #print tour info
    end = time.time()
    runtime = end - start

    #run gui
    root = Tk()
    root.title("Lin-Kernighan")
    root.iconbitmap('./graphics/favicon.ico')
    wndw = Canvas(root, width=sv.width, height=sv.height)
    wndw.configure(bg="white")
    wndw.pack(expand=YES, fill=BOTH)

    #run lin-kernighan
    print("<<< RUNNING LIN-KERNIGHAN ON PROBLEM {} >>>".format(inp))
    lin(tour, cost, solmax, runtime, None, root, wndw)
コード例 #18
0
 def gen():
     for i, data in enumerate(reader()):
         imgs = np.float32([item[0] for item in data])
         yield {"x": imgs}
コード例 #19
0
In [22]: from csv import reader

In [23]: with open("sales_record.csv", "r") as input_file:
    ...:     csv_fields = reader(input_file)
    ...:     for field_list in csv_fields:
    ...:         store_record = salesReceipt._make(field_list)
    ...:         total_sales += float(store_record.saleAmount)
    ...:         

In [24]: print("Total sales = ", total_sales)
Total sales =  105.97
コード例 #20
0
    else:
        key = ''

    if key != '':
        d = input('delete string? ')
        if d != '':
            gk.delete(d, key)

print('\nGet events from ics-file (timeedit.ics)\n')
go = input("y/n? ")
if go == 'y':
    ics = icskalenteri()
    ics.get()
    ics.show()

print(
    '\nClean summaries from ics (based on courses.txt) and add stamp to description\n'
)
go = input("y/n? ")
if go == 'y':
    r = reader()
    d = r.get()
    ics.fix_events_summary(d)
    ics.add_events_description_stamp()
    ics.show()

print('\nAdd ics-events to google calendar\n')
go = input("y/n? ")
if go == 'y':
    gk.add(ics.events)
コード例 #21
0
ファイル: dataIO.py プロジェクト: Meteodan/research
    def close(self, out_file_name=''):
        """
        close() [public]
        Purpose:    Closes the file and resets the class to its initial state.  The file is set for writing, and any changes have occurred, create a 
                        new file and copy all the variables over, without anything that's been "removed."
        Parameters: [none]
        Returns:    [nothing]
        """
        self._attempted_close = True
        if self._write:
            # Copy the data to another file, excluding the removed variables ...
            tmp_file_name = "%s.tmp" % self._file_name
            tmp_file = None
            if reader.__name__ == "Nio":
                tmp_file = reader.open_file(tmp_file_name,
                                            mode='w',
                                            format=self._format)
            elif reader.__name__ == "scipy.io.netcdf":
                tmp_file = reader.netcdf_file(tmp_file_name, mode='w')
            elif reader.__name__ == "Dataset":
                tmp_file = reader(tmp_file_name, 'w')

            # Copy dimensions over
            for dimension, length in self._df.dimensions.iteritems():
                if reader.__name__ == "Nio":
                    tmp_file.create_dimension(dimension, length)
                elif reader.__name__ in ["scipy.io.netcdf", "Dataset"]:
                    tmp_file.createDimension(dimension, len(length))

            # Create new dimensions added by splicing
            for variable, splice_info in self._sp_variables.iteritems():
                dim_names, index_lists = splice_info
                for dim_name, indexes in zip(dim_names, index_lists):
                    if dim_name not in tmp_file.dimensions.keys():
                        if reader.__name__ == "Nio":
                            tmp_file.create_dimension(dim_name,
                                                      len(np.unique(indexes)))
                        elif reader.__name__ in ["scipy.io.netcdf", "Dataset"]:
                            tmp_file.createDimension(dim_name,
                                                     len(np.unique(indexes)))

            # Copy variable definitions over
            for variable, data in self._df.variables.iteritems():
                if variable not in self._rm_variables:
                    # If this variable is going to be spliced, find its new dimensions
                    if variable not in self._sp_variables:
                        dim_names = data.dimensions
                    else:
                        dim_names, index_lists = self._sp_variables[variable]

                    if reader.__name__ == "Nio":
                        tmp_file.create_variable(variable, data.typecode(),
                                                 dim_names)
                    elif reader.__name__ in ["scipy.io.netcdf", "Dataset"]:
                        tmp_file.createVariable(variable, data.dtype,
                                                dim_names)

            # Copy file attributes over
            for attribute in dir(self._df):
                if attribute[:2] != "__" and not hasattr(
                        tmp_file, attribute
                ) and attribute not in self._rm_attributes['__file__']:
                    setattr(tmp_file, attribute, getattr(self._df, attribute))

            # Copy variable attributes over
            for variable, data in tmp_file.variables.iteritems():
                try:
                    for attribute in dir(self._df.variables[variable]):
                        if not hasattr(
                                data, attribute
                        ) and attribute not in self._rm_attributes[variable]:
                            setattr(
                                data, attribute,
                                getattr(self._df.variables[variable],
                                        attribute))
                except KeyError:
                    for attribute in dir(self._df.variables[variable]):
                        if not hasattr(data, attribute):
                            setattr(
                                data, attribute,
                                getattr(self._df.variables[variable],
                                        attribute))

            # Copy variable data over (NetCDF4 Python appears to require this be done after setting all the variable attributes)
            for variable, data in self._df.variables.iteritems():
                if variable not in self._rm_variables:
                    tmp_variable = np.copy(self._df.variables[variable][:])

                    # Overwrite the data if we need to ...
                    if variable in self._ov_variables:
                        tmp_variable = np.copy(self._ov_variables[variable])

                    # Do splicing ...
                    if variable in self._sp_variables:
                        dim_names, index_lists = self._sp_variables[variable]
                        tmp_variable = np.copy(
                            self._df.variables[variable][np.ix_(*index_lists)])

                    # Put it in the file ...
                    tmp_file.variables[variable][:] = tmp_variable


#           for dimension, length in tmp_file.dimensions.iteritems():
#               print dimension, length

        self._df.close()

        if self._write:
            # Move temporary file to original file's location
            import shutil
            if len(out_file_name) > 0:
                shutil.move(tmp_file_name, out_file_name)
            else:
                shutil.move(tmp_file_name, self._file_name)

        self._df = None
        self._file_name = ""
        self._format = ""
        self._read = False
        self._write = False
        self._rm_variables = []
        self._sp_variables = {}
        self._rm_attributes = {'__file__': []}
        return
コード例 #22
0
ファイル: dataIO.py プロジェクト: pulsatrixwx/PulsatrixWx
    def close(self,out_file_name=''):
        """
        close() [public]
        Purpose:    Closes the file and resets the class to its initial state.  The file is set for writing, and any changes have occurred, create a 
                        new file and copy all the variables over, without anything that's been "removed."
        Parameters: [none]
        Returns:    [nothing]
        """
        self._attempted_close = True
        if self._write:
            # Copy the data to another file, excluding the removed variables ...
            tmp_file_name = "%s.tmp" % self._file_name
            tmp_file = None
            if reader.__name__ == "Nio":
                tmp_file = reader.open_file(tmp_file_name, format='nc', mode='w')
            elif reader.__name__ == "scipy.io.netcdf":
                tmp_file = reader.netcdf_file(tmp_file_name, mode='w')
            elif reader.__name__ == "Dataset":
                tmp_file = reader(tmp_file_name,'w')

            # Copy dimensions over
            for dimension, length in self._df.dimensions.iteritems():
                if reader.__name__ == "Nio":
                    tmp_file.create_dimension(dimension, length)
                elif reader.__name__ in ["scipy.io.netcdf", "Dataset"]:
                    tmp_file.createDimension(dimension, len(length))

            # Create new dimensions added by splicing
            for variable, splice_info in self._sp_variables.iteritems():
                dim_names, index_lists = splice_info
                for dim_name, indexes in zip(dim_names, index_lists):
                    if dim_name not in tmp_file.dimensions.keys():
                        if reader.__name__ == "Nio":
                            tmp_file.create_dimension(dim_name, len(np.unique(indexes)))
                        elif reader.__name__ in ["scipy.io.netcdf", "Dataset"]:
                            tmp_file.createDimension(dim_name, len(np.unique(indexes)))

            # Copy variable definitions over
            for variable, data in self._df.variables.iteritems():
                if variable not in self._rm_variables:
                    # If this variable is going to be spliced, find its new dimensions
                    if variable not in self._sp_variables:
                        dim_names = data.dimensions
                    else:
                        dim_names, index_lists = self._sp_variables[variable]

                    if reader.__name__ == "Nio":
                        tmp_file.create_variable(variable, data.typecode(), dim_names)
                    elif reader.__name__ == "scipy.io.netcdf":
                        tmp_file.createVariable(variable, data.dtype, dim_names)
                    elif reader.__name__ == "Dataset":
                        if hasattr(data, "_FillValue"):
                            tmp_file.createVariable(variable, data.dtype, dim_names, fill_value=data._FillValue)
                        else:
                            tmp_file.createVariable(variable, data.dtype, dim_names)

            # Copy file attributes over
            for attribute in dir(self._df):
                if not hasattr(tmp_file, attribute) and attribute not in self._rm_attributes['__file__']:
                    setattr(tmp_file, attribute, getattr(self._df, attribute))

            # Copy variable attributes over
            for variable, data in tmp_file.variables.iteritems():
                try:
                    for attribute in dir(self._df.variables[variable]):
                        if not hasattr(data, attribute) and attribute not in self._rm_attributes[variable]:
                            setattr(data, attribute, getattr(self._df.variables[variable], attribute))
                except KeyError:
                    for attribute in dir(self._df.variables[variable]):
                        if not hasattr(data, attribute):
                            setattr(data, attribute, getattr(self._df.variables[variable], attribute))

            # Copy variable data over (NetCDF4 Python appears to require this be done after setting all the variable attributes)
            for variable, data in self._df.variables.iteritems():
                if variable not in self._rm_variables:
                    tmp_variable = np.copy(self._df.variables[variable][:])

                    # Overwrite the data if we need to ...
                    if variable in self._ov_variables:
                        tmp_variable = np.copy(self._ov_variables[variable])

                    # Do splicing ...
                    if variable in self._sp_variables:
                        dim_names, index_lists = self._sp_variables[variable]
                        tmp_variable = np.copy(self._df.variables[variable][np.ix_(*index_lists)])

                    # Put it in the file ...
                    tmp_file.variables[variable][:] = tmp_variable

#           for dimension, length in tmp_file.dimensions.iteritems():
#               print dimension, length

        self._df.close()

        if self._write:
            # Move temporary file to original file's location
            import shutil
            if len(out_file_name) > 0:
                shutil.move(tmp_file_name,out_file_name)
            else:
                shutil.move(tmp_file_name, self._file_name) 

        self._df = None
        self._file_name = ""
        self._read = False
        self._write = False
        self._rm_variables = []
        self._sp_variables = {}
        self._rm_attributes = { '__file__':[] }
        return
コード例 #23
0
 def __reader__():
     for item in reader():
         img = np.array(item[0]).astype('float32').reshape(3, 224, 224)
         label = np.array(item[1]).astype('int64').reshape(1)
         yield img, label
コード例 #24
0
ファイル: run_beste.py プロジェクト: bestebegicarslan/kes73
from select_parameters import select_parameters
from generatespectra import generatorspectra
from tablespectra import tablespectral
from skyflux import sky
from loadfilter import *
from contourplot import contourplot
import glob

# ask the user the parameter file to use
filename = select_parameters()

# import the parameters from the desired parameter file
filtername, spectraltype, mV,  startrange, endrange, arrsize, skyfile,\
    atmofile, telefile, pbfiltername, fudgefactor, expostime, diameter,\
        seeingvalue, quantumeff, startrange2, endrange2, vbessel, umag, gmag,\
            rmag,imag,zmag = reader(filename)

# generate sky values and spectra
skyvalues = sky(filename)
if spectraltype < 0.0:
    library=(glob.glob('/home/software/mkid-obs-simulator/library_mkid/*'))
    print(library)
    template_sp = raw_input('Please select your template spectrum:')
    
    
    spectra = tablespectral(spectraltype, mV, filename, template_sp)
else:
    spectra = generatorspectra(spectraltype,mV, filename)

# print the graph!
print(contourplot(spectra,skyvalues, filename))