def generate_objects(base_handler,
                     extra_handler,
                     num_objects,
                     show_pbar = False):
    corpus = [word.lower()
              for word in nltk.corpus.brown.words()
              if word not in string.punctuation]
    word_counts = {}

    seeds = range(num_objects)
    secondary_seeds = range(num_objects, num_objects + (num_objects / 1000))

    # Generate objects in random, but repeatable, order.
    random.seed(42)
    random.shuffle(seeds)
    random.shuffle(secondary_seeds)

    if show_pbar:
        widgets = ['Generating: ',
                   progressbar.Percentage(),
                   ' ',
                   progressbar.Bar(),
                   ' ',
                   progressbar.ETA()]
        pbar = progressbar.ProgressBar(
            widgets=widgets,
            maxval=(num_objects + (num_objects / 1000))).start()

    for i, seed in enumerate(seeds):
        if i == len(seeds) - 1:
            base_handler.handle_object(
                generate_object(seed, num_objects, corpus, word_counts),
                i,
                True)
        else:
            base_handler.handle_object(
                generate_object(seed, num_objects, corpus, word_counts),
                i,
                False)
        if show_pbar:
            pbar.update(i+1)

    rec_strs = find_words_by_frequency(word_counts, num_objects / 1000)

    for i, seed in enumerate(secondary_seeds):
        if i == len(secondary_seeds) - 1:
            extra_handler.handle_object(
                generate_object(seed, num_objects, corpus, {}),
                i + num_objects,
                True)
        else:
            extra_handler.handle_object(
                generate_object(seed, num_objects, corpus, {}),
                i + num_objects,
                False)
        if show_pbar:
            pbar.update(i+1)

    if show_pbar:
        pbar.finish()

    return rec_strs
Exemple #2
0
    def updateFooter(self):
        if not self.cuu:
            return
        activetasks = self.helper.running_tasks
        failedtasks = self.helper.failed_tasks
        runningpids = self.helper.running_pids
        currenttime = time.time()
        if not self.lasttime or (currenttime - self.lasttime > 5):
            self.helper.needUpdate = True
            self.lasttime = currenttime
        if self.footer_present and not self.helper.needUpdate:
            return
        self.helper.needUpdate = False
        if self.footer_present:
            self.clearFooter()
        if (not self.helper.tasknumber_total or self.helper.tasknumber_current
                == self.helper.tasknumber_total) and not len(activetasks):
            return
        tasks = []
        for t in runningpids:
            progress = activetasks[t].get("progress", None)
            if progress is not None:
                pbar = activetasks[t].get("progressbar", None)
                rate = activetasks[t].get("rate", None)
                start_time = activetasks[t].get("starttime", None)
                if not pbar or pbar.bouncing != (progress < 0):
                    if progress < 0:
                        pbar = BBProgress(
                            "0: %s (pid %s) " %
                            (activetasks[t]["title"], activetasks[t]["pid"]),
                            100,
                            widgets=[progressbar.BouncingSlider(), ''],
                            extrapos=2,
                            resize_handler=self.sigwinch_handle)
                        pbar.bouncing = True
                    else:
                        pbar = BBProgress(
                            "0: %s (pid %s) " %
                            (activetasks[t]["title"], activetasks[t]["pid"]),
                            100,
                            widgets=[
                                progressbar.Percentage(), ' ',
                                progressbar.Bar(), ''
                            ],
                            extrapos=4,
                            resize_handler=self.sigwinch_handle)
                        pbar.bouncing = False
                    activetasks[t]["progressbar"] = pbar
                tasks.append((pbar, progress, rate, start_time))
            else:
                start_time = activetasks[t].get("starttime", None)
                if start_time:
                    tasks.append("%s - %s (pid %s)" %
                                 (activetasks[t]["title"],
                                  self.elapsed(currenttime - start_time),
                                  activetasks[t]["pid"]))
                else:
                    tasks.append(
                        "%s (pid %s)" %
                        (activetasks[t]["title"], activetasks[t]["pid"]))

        if self.main.shutdown:
            content = "Waiting for %s running tasks to finish:" % len(
                activetasks)
            print(content)
        else:
            if self.quiet:
                content = "Running tasks (%s of %s)" % (
                    self.helper.tasknumber_current,
                    self.helper.tasknumber_total)
            elif not len(activetasks):
                content = "No currently running tasks (%s of %s)" % (
                    self.helper.tasknumber_current,
                    self.helper.tasknumber_total)
            else:
                content = "Currently %2s running tasks (%s of %s)" % (
                    len(activetasks), self.helper.tasknumber_current,
                    self.helper.tasknumber_total)
            maxtask = self.helper.tasknumber_total
            if not self.main_progress or self.main_progress.maxval != maxtask:
                widgets = [
                    ' ', progressbar.Percentage(), ' ',
                    progressbar.Bar()
                ]
                self.main_progress = BBProgress(
                    "Running tasks",
                    maxtask,
                    widgets=widgets,
                    resize_handler=self.sigwinch_handle)
                self.main_progress.start(False)
            self.main_progress.setmessage(content)
            progress = self.helper.tasknumber_current - 1
            if progress < 0:
                progress = 0
            content = self.main_progress.update(progress)
            print('')
        lines = 1 + int(len(content) / (self.columns + 1))
        if self.quiet == 0:
            for tasknum, task in enumerate(tasks[:(self.rows - 2)]):
                if isinstance(task, tuple):
                    pbar, progress, rate, start_time = task
                    if not pbar.start_time:
                        pbar.start(False)
                        if start_time:
                            pbar.start_time = start_time
                    pbar.setmessage('%s:%s' %
                                    (tasknum, pbar.msg.split(':', 1)[1]))
                    pbar.setextra(rate)
                    if progress > -1:
                        content = pbar.update(progress)
                    else:
                        content = pbar.update(1)
                    print('')
                else:
                    content = "%s: %s" % (tasknum, task)
                    print(content)
                lines = lines + 1 + int(len(content) / (self.columns + 1))
        self.footer_present = lines
        self.lastpids = runningpids[:]
        self.lastcount = self.helper.tasknumber_current
Exemple #3
0
                        r_pos, r_move.usi()))
                    valid = True

    train = selection[:args.n // 2]
    test = selection[args.n // 2:]

    print("Selected {} moves for training, {} for testing".format(
        len(train), len(test)))

    import shogi

    print("--------------- Caching Legal Moves ----------------")
    widgets = [
        ' [',
        progressbar.Timer(), '] ',
        progressbar.Bar(), ' (',
        progressbar.AdaptiveETA(), ') ',
        progressbar.Percentage()
    ]
    bar = progressbar.ProgressBar(maxval=len(selection), widgets=widgets)
    completed_games = 0
    total_positions = 0

    # Cache to store all of the legal moves for all the N sample positions
    legal_moves_cache = dict()
    for game in selection:
        pos, move = game[0], game[1]
        board = shogi.Board(pos)

        actions = []
        for move in board.legal_moves:
Exemple #4
0
    def processingLayer3(self):
        """
        Processing for RGB image using Fourier transform to aproach the segmentation
        """
        print(" ")
        print("Preprocessing - Layer 3 processing")
        f = np.fft.fft2(self.image)
        fshift = np.fft.fftshift(f)
        magnitude_spectrum = (20 * np.log(np.abs(fshift))).astype('uint8')

        ## Mean procedure
        mean_fourier = np.array([
            magnitude_spectrum[:, :, 0].mean(),
            magnitude_spectrum[:, :, 1].mean(), magnitude_spectrum[:, :,
                                                                   2].mean()
        ])

        ## Range procedure
        hist_01 = cv2.calcHist([magnitude_spectrum], [0, 1], None, [256, 256],
                               [0, 256, 0, 256])
        hist_02 = cv2.calcHist([magnitude_spectrum], [0, 2], None, [256, 256],
                               [0, 256, 0, 256])
        val_01 = np.percentile(hist_01, self.percentFou)
        val_02 = np.percentile(hist_02, self.percentFou)
        mask_01 = hist_01 > val_01
        mask_02 = hist_02 > val_02
        cnts_01, _ = cv2.findContours((mask_01.copy()).astype('uint8'),
                                      cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        cnts_02, _ = cv2.findContours((mask_02.copy()).astype('uint8'),
                                      cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        for c in cnts_01:
            (x1, y1, w1, h1) = cv2.boundingRect(np.array(c))
        for h in cnts_02:
            (x2, y2, w2, h2) = cv2.boundingRect(np.array(h))

        ### Range fourier
        rangeFourier = np.array([[x2, x2 + w2], [x1, x1 + w1], [y1, y1 + h1]])

        ##### Procesing of the image uding mean aproach

        fourier = np.copy(magnitude_spectrum)
        image_trasnform = np.copy(self.image)
        size_x, size_y, size_c = fourier.shape
        binary_fourier = np.zeros([size_x, size_y], dtype='uint8')
        val = np.zeros(size_c)
        ### Utilizando media de color
        percent = 0.83
        inicio = time.time()
        area = size_x * size_y
        widgets = [
            progressbar.Percentage(), ' ',
            progressbar.Bar(), ' ',
            progressbar.ETA(), ' ',
            progressbar.AdaptiveETA()
        ]
        bar = progressbar.ProgressBar(widgets=widgets, maxval=area)
        bar.start()
        for x in range(0, size_x):
            for y in range(0, size_y):
                for c in range(0, size_c):
                    val[c] = np.abs(1 - (
                        (image_trasnform[x, y, c]) - mean_fourier[c]) / 255)
                    if (val[c] < percent):
                        val[0] = 0
                        break
                    elif (val[c] >= percent):
                        val[c] = 1  #se cumple la condición de rango se deja el pixel
                if (val[0] == 0):
                    binary_fourier[x, y] = 0
                elif (val.all() == 1):
                    binary_fourier[x, y] = 1
                val = np.zeros(size_c)
            fraction = x * y
            bar.update(fraction)
        final = time.time() - inicio
        bar.update(area)
        print("Tiempo de procesamiento : ", round(final, 2), "Segundos")

        ### Morphological Process
        # First do a dilatation
        radio = 2
        sel = disk(radio)
        binary_dilat1 = dilation(binary_fourier, sel)
        for i in range(0, 2):
            binary_dilat1 = dilation(binary_dilat1, sel)
        # Second erase little objects
        radio = 5
        sel = disk(radio)
        binary_erosion1 = erosion(binary_dilat1.copy(), sel)
        for i in range(0, 2):
            binary_erosion1 = erosion(binary_erosion1, sel)
        # Then dilate again
        radio = 3
        sel = disk(radio)
        binary = dilation(binary_erosion1.copy(), sel)

        return binary, binary_fourier, rangeFourier, mean_fourier
Exemple #5
0
def plot_raydensity(map_object, station_events, domain):
    """
    Create a ray-density plot for all events and all stations.

    This function is potentially expensive and will use all CPUs available.
    Does require geographiclib to be installed.
    """
    import ctypes as C
    from lasif import rotations
    from lasif.domain import RectangularSphericalSection
    from lasif.tools.great_circle_binner import GreatCircleBinner
    from lasif.utils import Point
    import multiprocessing
    import progressbar
    from scipy.stats import scoreatpercentile

    if not isinstance(domain, RectangularSphericalSection):
        raise NotImplementedError(
            "Raydensity currently only implemented for rectangular domains. "
            "Should be easy to implement for other domains. Let me know.")

    # Merge everything so that a list with coordinate pairs is created. This
    # list is then distributed among all processors.
    station_event_list = []
    for event, stations in station_events:
        if domain.rotation_angle_in_degree:
            # Rotate point to the non-rotated domain.
            e_point = Point(*rotations.rotate_lat_lon(
                event["latitude"], event["longitude"], domain.rotation_axis,
                -1.0 * domain.rotation_angle_in_degree))
        else:
            e_point = Point(event["latitude"], event["longitude"])
        for station in stations.itervalues():
            # Rotate point to the non-rotated domain if necessary.
            if domain.rotation_angle_in_degree:
                p = Point(*rotations.rotate_lat_lon(
                    station["latitude"], station["longitude"],
                    domain.rotation_axis, -1.0 *
                    domain.rotation_angle_in_degree))
            else:
                p = Point(station["latitude"], station["longitude"])
            station_event_list.append((e_point, p))

    circle_count = len(station_event_list)

    # The granularity of the latitude/longitude discretization for the
    # raypaths. Attempt to get a somewhat meaningful result in any case.
    lat_lng_count = 1000
    if circle_count < 1000:
        lat_lng_count = 1000
    if circle_count < 10000:
        lat_lng_count = 2000
    else:
        lat_lng_count = 3000

    cpu_count = multiprocessing.cpu_count()

    def to_numpy(raw_array, dtype, shape):
        data = np.frombuffer(raw_array.get_obj())
        data.dtype = dtype
        return data.reshape(shape)

    print "\nLaunching %i greatcircle calculations on %i CPUs..." % \
        (circle_count, cpu_count)

    widgets = [
        "Progress: ",
        progressbar.Percentage(),
        progressbar.Bar(), "",
        progressbar.ETA()
    ]
    pbar = progressbar.ProgressBar(widgets=widgets,
                                   maxval=circle_count).start()

    def great_circle_binning(sta_evs, bin_data_buffer, bin_data_shape, lock,
                             counter):
        new_bins = GreatCircleBinner(domain.min_latitude, domain.max_latitude,
                                     lat_lng_count, domain.min_longitude,
                                     domain.max_longitude, lat_lng_count)
        for event, station in sta_evs:
            with lock:
                counter.value += 1
            if not counter.value % 25:
                pbar.update(counter.value)
            new_bins.add_greatcircle(event, station)

        bin_data = to_numpy(bin_data_buffer, np.uint32, bin_data_shape)
        with bin_data_buffer.get_lock():
            bin_data += new_bins.bins

    # Split the data in cpu_count parts.
    def chunk(seq, num):
        avg = len(seq) / float(num)
        out = []
        last = 0.0
        while last < len(seq):
            out.append(seq[int(last):int(last + avg)])
            last += avg
        return out

    chunks = chunk(station_event_list, cpu_count)

    # One instance that collects everything.
    collected_bins = GreatCircleBinner(domain.min_latitude,
                                       domain.max_latitude, lat_lng_count,
                                       domain.min_longitude,
                                       domain.max_longitude, lat_lng_count)

    # Use a multiprocessing shared memory array and map it to a numpy view.
    collected_bins_data = multiprocessing.Array(C.c_uint32,
                                                collected_bins.bins.size)
    collected_bins.bins = to_numpy(collected_bins_data, np.uint32,
                                   collected_bins.bins.shape)

    # Create, launch and join one process per CPU. Use a shared value as a
    # counter and a lock to avoid race conditions.
    processes = []
    lock = multiprocessing.Lock()
    counter = multiprocessing.Value("i", 0)
    for _i in xrange(cpu_count):
        processes.append(
            multiprocessing.Process(target=great_circle_binning,
                                    args=(chunks[_i], collected_bins_data,
                                          collected_bins.bins.shape, lock,
                                          counter)))
    for process in processes:
        process.start()
    for process in processes:
        process.join()

    pbar.finish()

    stations = chain.from_iterable(
        (_i[1].values() for _i in station_events if _i[1]))
    # Remove duplicates
    stations = [(_i["latitude"], _i["longitude"]) for _i in stations]
    stations = set(stations)
    title = "%i Events, %i unique raypaths, "\
            "%i unique stations" % (len(station_events), circle_count,
                                    len(stations))
    plt.title(title, size="xx-large")

    data = collected_bins.bins.transpose()

    if data.max() >= 10:
        data = np.log10(np.clip(data, a_min=0.5, a_max=data.max()))
        data[data >= 0.0] += 0.1
        data[data < 0.0] = 0.0
        max_val = scoreatpercentile(data.ravel(), 99)
    else:
        max_val = data.max()

    cmap = cm.get_cmap("gist_heat")
    cmap._init()
    cmap._lut[:120, -1] = np.linspace(0, 1.0, 120)**2

    # Slightly change the appearance of the map so it suits the rays.
    map_object.fillcontinents(color='#dddddd', lake_color='#dddddd', zorder=0)

    lngs, lats = collected_bins.coordinates
    # Rotate back if necessary!
    if domain.rotation_angle_in_degree:
        for lat, lng in zip(lats, lngs):
            lat[:], lng[:] = rotations.rotate_lat_lon(
                lat, lng, domain.rotation_axis,
                domain.rotation_angle_in_degree)
    ln, la = map_object(lngs, lats)
    map_object.pcolormesh(ln, la, data, cmap=cmap, vmin=0, vmax=max_val)
    # Draw the coastlines so they appear over the rays. Otherwise things are
    # sometimes hard to see.
    map_object.drawcoastlines()
    map_object.drawcountries(linewidth=0.2)
Exemple #6
0
#### Get list of user strings ####
UserMongoList = []
for user in MongoClient().config_static.profile_user.find():
    UserMongoList.append(user['name'])
helperObj.UserMongoList = UserMongoList

#### Determening lines to process####
options.endindex = InputMongoDB.count() if int(options.endindex) == 0 else int(
    options.endindex)
diffLines = int(options.endindex) - int(options.startIndex) + 1

#### Preparing progress bar ####
progressBarObj = progressbar.ProgressBar(
    maxval=diffLines,
    widgets=[progressbar.Bar('=', '[', ']'), ' ',
             progressbar.Percentage()])
progressBarObj.start()


def processLine(start, index):
    """ Assign workers with workload """

    global converted

    for inputLine in InputMongoDB.find()[start:start +
                                         int(options.linesPerThread)]:

        if converted >= diffLines:
            print 'break on: ' + str(converted)
            break
    def __run_tests(self, tests):
        if len(tests) == 0:
            return 0

        logging.debug("Start __run_tests.")
        logging.debug("__name__ = %s", __name__)

        error_happened = False
        if self.os.lower() == 'windows':
            logging.debug("Executing __run_tests on Windows")
            for test in tests:
                with open(self.current_benchmark,
                          'w') as benchmark_resume_file:
                    benchmark_resume_file.write(test.name)
                with self.quiet_out.enable():
                    if self.__run_test(test) != 0:
                        error_happened = True
        else:
            logging.debug("Executing __run_tests on Linux")

            # Setup a nice progressbar and ETA indicator
            widgets = [
                self.mode, ': ',
                progressbar.Percentage(), ' ',
                progressbar.Bar(), ' Rough ',
                progressbar.ETA()
            ]
            pbar = progressbar.ProgressBar(widgets=widgets,
                                           maxval=len(tests)).start()
            pbar_test = 0

            # These features do not work on Windows
            for test in tests:
                pbar.update(pbar_test)
                pbar_test = pbar_test + 1
                if __name__ == 'benchmark.benchmarker':
                    print header("Running Test: %s" % test.name)
                    with open(self.current_benchmark,
                              'w') as benchmark_resume_file:
                        benchmark_resume_file.write(test.name)
                    with self.quiet_out.enable():
                        test_process = Process(target=self.__run_test,
                                               name="Test Runner (%s)" %
                                               test.name,
                                               args=(test, ))
                        test_process.start()
                        test_process.join(self.run_test_timeout_seconds)
                    self.__load_results(
                    )  # Load intermediate result from child process
                    if (test_process.is_alive()):
                        logging.debug(
                            "Child process for {name} is still alive. Terminating."
                            .format(name=test.name))
                        self.__write_intermediate_results(
                            test.name, "__run_test timeout (=" +
                            str(self.run_test_timeout_seconds) + " seconds)")
                        test_process.terminate()
                        test_process.join()
                    if test_process.exitcode != 0:
                        error_happened = True
            pbar.finish()

        if os.path.isfile(self.current_benchmark):
            os.remove(self.current_benchmark)
        logging.debug("End __run_tests.")

        if error_happened:
            return 1
        return 0
Exemple #8
0
def ReduceData(filename):

# Read attributes and data
  f=h5py.File(filename, 'r')
  level = f.attrs['MaxLevel']
  block_size = f.attrs['MeshBlockSize']
  root_grid_size = f.attrs['RootGridSize']
  nx1 = root_grid_size[0] * 2**level
  nx2 = root_grid_size[1] * 2**level if root_grid_size[1] > 1 else 1
  nx3 = root_grid_size[2] * 2**level if root_grid_size[2] > 1 else 1

  if nx3 > 1:
    dim = 3
  elif nx2 > 1:
    dim = 2
  else:
    dim = 1


  quantities = f.attrs['VariableNames'][:]
  quantities = [str(q) for q in quantities \
        if q != 'x1f' and q != 'x2f' and q != 'x3f']

  data={}

# the coordinates
  for d in range(1,4):
    nx = (nx1,nx2,nx3)[d-1]
    xmin = f.attrs['RootGridX'+repr(d)][0]
    xmax = f.attrs['RootGridX'+repr(d)][1]
    xrat_root = f.attrs['RootGridX'+repr(d)][2]
    if (xrat_root == 1.0):
      data['x'+repr(d)+'f'] = np.linspace(xmin, xmax, nx+1)
    else:
      xrat = xrat_root ** (1.0 / 2**level)
      data['x'+repr(d)+'f'] = \
         xmin + (1.0-xrat**np.arange(nx+1)) / (1.0-xrat**nx) * (xmax-xmin)

# Get metadata describing file layout
  num_blocks = f.attrs['NumMeshBlocks']
  dataset_names = f.attrs['DatasetNames'][:]
  dataset_sizes = f.attrs['NumVariables'][:]
  dataset_sizes_cumulative = np.cumsum(dataset_sizes)
  variable_names = f.attrs['VariableNames'][:]
  levels = f['Levels'][:]
  logical_locations = f['LogicalLocations'][:]
  quantity_datasets = []
  quantity_indices = []
  spec_datasets = []
  spec_indices = []
  for q in quantities:
    var_num = np.where(variable_names == q)[0][0]
    dataset_num = np.where(dataset_sizes_cumulative > var_num)[0][0]
    if dataset_num == 0:
      dataset_index = var_num
    else:
      dataset_index = var_num - dataset_sizes_cumulative[dataset_num-1]
    quantity_datasets.append(dataset_names[dataset_num])
    quantity_indices.append(dataset_index)
    if q == 'rho' or q=='vel1' or q=='vel2' or q=='vel3' or q=='pgas' or q=='B1' or \
                q=='B2' or q=='B3' or q=='Er' or q=='Sigma_s' or q=='Sigma_a' or q=='Fr01' or q=='Fr02':
      spec_datasets.append(dataset_names[dataset_num])
      spec_indices.append(dataset_index)

# get rho, v1, v2, v3, B1, B2, B3, kappa_s, kappa_a


# now add the derived quantities
  quantities.append('Maxwell')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('Reynolds')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('BrBphi')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('BthetaBphi')
  quantity_datasets.append('None')
  quantity_indices.append(0)
  
  quantities.append('rhovrvphi')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('rhovthetavphi')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('vrEr')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('vthetaEr')
  quantity_datasets.append('None')
  quantity_indices.append(0)
  
  quantities.append('rhoPB')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('rhosq')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('PB1')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('PB2')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('PB3')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('PB')
  quantity_datasets.append('None')
  quantity_indices.append(0)


  quantities.append('PBsq')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('kappa_s')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('kappa_a')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('Radacc')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('RhoVr')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('RhoVtheta')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('RhoVphi')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('RhoVout')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('RhoVin')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('Ekin1')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('Ekin2')
  quantity_datasets.append('None')
  quantity_indices.append(0)
  
  quantities.append('Ekin3')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('tgas')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('Fr01Sigma')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('Fr02Sigma')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('Fr01kappa')
  quantity_datasets.append('None')
  quantity_indices.append(0)

  quantities.append('Fr02kappa')
  quantity_datasets.append('None')
  quantity_indices.append(0)

# the quantities
# get the azimuthal averaged data

  for q in quantities:
    data[q] = np.zeros((nx3,nx2,nx1))



  bar = progressbar.ProgressBar(maxval=num_blocks, \
           widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()]).start()


# Go through blocks in data file
  for block_num in range(num_blocks):
    # Extract location information
    block_level = levels[block_num]
    block_location = logical_locations[block_num,:]
   # Calculate scale (number of copies per dimension)
    s= 2**(level-block_level)
  
  # the block size
    radius=f['x1f'][block_num]
    theta=f['x2f'][block_num]
    phi=f['x3f'][block_num]
  
  # get cell center coordinates
    x1v=np.zeros(block_size[0])
    x2v=np.zeros(block_size[1])
    x3v=np.zeros(block_size[2])
  
    for ni in range(block_size[0]):
      x1v[ni]=0.75*(radius[ni+1]**4.0 - radius[ni]**4.0)/(radius[ni+1]**3.0-radius[ni]**3.0);
  
    for nj in range(block_size[1]):
      x2v[nj]=((np.sin(theta[nj+1]) - theta[nj+1] * np.cos(theta[nj+1])) \
            -(np.sin(theta[nj]) - theta[nj] * np.cos(theta[nj]))) \
              / (np.cos(theta[nj]) - np.cos(theta[nj+1]));

    for nk in range(block_size[2]):
      x3v[nk] = 0.5 * (phi[nk+1]+phi[nk])
  
    grid_phi,grid_theta,grid_r=np.meshgrid(x3v,x2v,x1v,indexing='ij')
  

# Calculate fine-level begin indices
    il = block_location[0] * block_size[0] * s
    jl = block_location[1] * block_size[1] * s if dim >= 2 else 0
    kl = block_location[2] * block_size[2] * s if dim >= 3 else 0

# Calculate fine-level end indices
    iu = il + block_size[0] * s
    ju = jl + block_size[1] * s if dim >= 2 else 1
    ku = kl + block_size[2] * s if dim >= 3 else 1

# Calculate fine-level offsets
    io_vals = range(s)
    jo_vals = range(s) if dim >= 2 else (0,)
    ko_vals = range(s) if dim >= 3 else (0,)

          
    rho_data=f[spec_datasets[0]][spec_indices[0],block_num,:]
    v1_data=f[spec_datasets[1]][spec_indices[1],block_num,:]
    vout_data=v1_data.clip(min=0.0)
    vin_data=v1_data.clip(max=0.0)
    v2_data=f[spec_datasets[2]][spec_indices[2],block_num,:]
    v3_data=f[spec_datasets[3]][spec_indices[3],block_num,:]
    pgas_data=f[spec_datasets[4]][spec_indices[4],block_num,:]
    B1_data=f[spec_datasets[5]][spec_indices[5],block_num,:]
    B2_data=f[spec_datasets[6]][spec_indices[6],block_num,:]
    B3_data=f[spec_datasets[7]][spec_indices[7],block_num,:]
    Er_data=f[spec_datasets[8]][spec_indices[8],block_num,:]
    Fr01_data=f[spec_datasets[9]][spec_indices[9],block_num,:]
    Fr02_data=f[spec_datasets[10]][spec_indices[10],block_num,:]
    sigma_s_data=f[spec_datasets[11]][spec_indices[11],block_num,:]
    sigma_a_data=f[spec_datasets[12]][spec_indices[12],block_num,:]
    PB_data=0.5*(np.multiply(B1_data,B1_data)+np.multiply(B2_data,B2_data)+np.multiply(B3_data,B3_data))
    PB1_data=0.5*np.multiply(B1_data,B1_data)   
    PB2_data=0.5*np.multiply(B2_data,B2_data)
    PB3_data=0.5*np.multiply(B3_data,B3_data)
    rhovphi_data=np.multiply(rho_data,v3_data)



# Assign values
    for q,dataset,index in zip(quantities,quantity_datasets,quantity_indices):
      if q=='rhosq':
         oridata=np.multiply(rho_data,rho_data)
      elif q=='PB':
         oridata=PB_data
      elif q=='PB1':
         oridata=PB1_data
      elif q=='PB2':
         oridata=PB2_data
      elif q=='PB3':
         oridata=PB3_data
      elif q=='PBsq':
         oridata=np.multiply(PB_data,PB_data)
      elif q=='kappa_s':
         oridata=np.divide(sigma_s_data,rho_data)
      elif q=='kappa_a':
         oridata=np.divide(sigma_a_data,rho_data)
      elif q=='Maxwell':
         bxdata=np.multiply(B2_data,np.cos(grid_theta))+np.multiply(B1_data,np.sin(grid_theta))
         oridata=-np.multiply(bxdata,B3_data)
      elif q=='Reynolds':
         vxdata=np.multiply(v2_data,np.cos(grid_theta))+np.multiply(v1_data,np.sin(grid_theta))
         oridata=np.multiply(vxdata,np.multiply(v3_data,rho_data))
      elif q=='Radacc':
         oridata=np.divide(np.multiply(Fr01_data,(sigma_s_data+sigma_a_data)),rho_data)
      elif q=='RhoVr':
         oridata=np.multiply(v1_data,rho_data)
      elif q=='RhoVtheta':
         oridata=np.multiply(v2_data,rho_data)
      elif q=='RhoVphi':
         oridata=np.multiply(v3_data,rho_data)
      elif q=='RhoVout':
         oridata=np.multiply(vout_data,rho_data)
      elif q=='RhoVin':
         oridata=np.multiply(vin_data,rho_data)
      elif q=='Ekin1':
         oridata=0.5*np.multiply(v1_data, v1_data)
         oridata=np.multiply(oridata,rho_data)
      elif q=='Ekin2':
         oridata=0.5*np.multiply(v2_data, v2_data)
         oridata=np.multiply(oridata,rho_data)
      elif q=='Ekin3':
         oridata=0.5*np.multiply(v3_data, v3_data)
         oridata=np.multiply(oridata,rho_data)
      elif q=='BrBphi':
         oridata=np.multiply(B1_data,B3_data)
      elif q=='BthetaBphi':
         oridata=np.multiply(B2_data,B3_data)
      elif q=='rhovrvphi':
         oridata=np.multiply(v1_data,rhovphi_data)
      elif q=='rhovthetavphi':
         oridata=np.multiply(v2_data,rhovphi_data)
      elif q=='vrEr':
         oridata=np.multiply(v1_data,Er_data)
      elif q=='vthetaEr':
         oridata=np.multiply(v2_data,Er_data)
      elif q=='rhoPB':
         oridata=np.multiply(rho_data,PB_data)
      elif q=='tgas':
         oridata=np.divide(pgas_data,rho_data)
      elif q=='Fr01Sigma':
         oridata=np.multiply(Fr01_data,(sigma_s_data+sigma_a_data))
      elif q=='Fr02Sigma':
         oridata=np.multiply(Fr02_data,(sigma_s_data+sigma_a_data))
      elif q=='Fr01kappa':
         oridata=np.divide(np.multiply(Fr01_data,(sigma_s_data+sigma_a_data)),rho_data)
      elif q=='Fr02kappa':
         oridata=np.divide(np.multiply(Fr02_data,(sigma_s_data+sigma_a_data)),rho_data)
      else:
         oridata=f[dataset][index,block_num,:]
          
      for ko in ko_vals:
        for jo in jo_vals:
          for io in io_vals:
            data[q][kl+ko:ku+ko:s,jl+jo:ju+jo:s,il+io:iu+io:s] = oridata

    bar.update(block_num+1)


  bar.finish()

# get the cell center coordinates
  # get cell center coordinates
  data['x1v']=np.zeros(nx1)
  data['x2v']=np.zeros(nx2)
  data['x3v']=np.zeros(nx3)
  
  for ni in range(nx1):
    data['x1v'][ni]=0.75*(data['x1f'][ni+1]**4.0 - data['x1f'][ni]**4.0)/(data['x1f'][ni+1]**3.0-data['x1f'][ni]**3.0);
  
  for nj in range(nx2):
    data['x2v'][nj]=((np.sin(data['x2f'][nj+1]) - data['x2f'][nj+1] * np.cos(data['x2f'][nj+1])) \
           -(np.sin(data['x2f'][nj]) - data['x2f'][nj] * np.cos(data['x2f'][nj]))) \
              / (np.cos(data['x2f'][nj]) - np.cos(data['x2f'][nj+1]));

  for nk in range(nx3):
    data['x3v'][nk]=0.5*(data['x3f'][nk+1] + data['x3f'][nk]);



  #add time
  data['Time']=f.attrs['Time']

  f.close()

  return data
Exemple #9
0
    def loadSequence(self,seqName, cfg=None, Nmax = float('inf'),shuffle = False, rng = None, docom = False,allJoints=False):

        config = {'cube':(800,800,1200)}
        config['cube'] = [s*self.scales[seqName] for s in config['cube']]

        if Nmax is float('inf'):
            pickleCache = '{}/{}_{}_{}_cache.pkl'.format(self.cacheDir, self.__class__.__name__, seqName,allJoints)
        else:
            pickleCache = '{}/{}_{}_{}_cache_{}.pkl'.format(self.cacheDir, self.__class__.__name__, seqName, allJoints,Nmax)
        print(pickleCache)

        if self.useCache:
            if os.path.isfile(pickleCache):
                print("Loading cache data from {}".format(pickleCache))
                f = open(pickleCache,'rb')
                (seqName,data,config) = cPickle.load(f)
                f.close()

                #shuffle data
                if shuffle and rng is not None:
                    print("shuffling")
                    rng.shuffle(data)
                if not(np.isinf(Nmax)):
                    return NamedImgSequence(seqName,data[0:Nmax],config)
                else:
                    return NamedImgSequence(seqName,data,config)

        #load the dataset
        objdir = '{}/{}/'.format(cfg.base_dir,cfg.data_dirs[seqName])
        names = glob.glob(os.path.join(objdir,'*.txt'))

        joints3D = np.empty([len(names),cfg.num_joints, cfg.num_dims])
        joints2D = np.empty([len(names),cfg.num_joints, cfg.num_dims])

        # load the groundtruth data here
        cnt = 0
        for name in tqdm.tqdm(names,total=len(names)):
            all_jnts = np.loadtxt(name)
            # get the proper subset
            joints3D[cnt] = all_jnts[self.restrictedJoints]
            # get 2D projections
            joints2D[cnt] = self.joints3DToImg(joints3D[cnt])
            cnt+=1

        if allJoints:
            eval_idxs = np.arange(cfg.num_joints)
        else:
            eval_idxs = self.restrictedJoints
        self.numJoints = len(eval_idxs)

        txt= 'Loading {}'.format(seqName)
        pbar = pb.ProgressBar(maxval=joints3D.shape[0],widgets=[txt,pb.Percentage(),pb.Bar()])
        pbar.start()

        data = []
        i=0
        for line in range(joints3D.shape[0]):
            imgid = names[line].split('/')[-1].split('.')[0].split('_')[-1]
            dptFileName = os.path.join(cfg.base_dir,
                                       cfg.data_dirs[seqName],
                                       'depth_%s.png'%imgid)

            if not os.path.isfile(dptFileName):
                print("File {} does not exist!").format(dptFileName)
                i += 1
                continue
            dpt = self.loadDepthMap(dptFileName)

            gtorig = joints2D[line,eval_idxs,:]
            gt3Dorig = joints3D[line,eval_idxs,:]

            data.append(LabelledFrame(dpt.astype(np.float32),gtorig,gt3Dorig,dptFileName,''))
            pbar.update(i)
            i+=1

            #early stop
            if len(data)>=Nmax:
                break

        pbar.finish()
        print("loaded {} samples.".format(len(data)))

        if self.useCache:
            print("Save cache data to {}".format(pickleCache))
            f = open(pickleCache,'wb')
            cPickle.dump((seqName,data,config), f, protocol=cPickle.HIGHEST_PROTOCOL)
            f.close()

        #shuffle data
        if shuffle and rng is not None:
            print("shuffling")
            rng.shuffle(data)
        
        return NamedImgSequence(seqName,data,config)
Exemple #10
0
def simulation(dat, var, q_in=None, q_out=None):
    logger.info("New simulation launched.")
    # Set up perturbation class -> modifies parameters dictionary
    d = dat
    # General options dictionary (all the dictionaries below are connected, same memory)
    o = dat.opts

    # Dictionary for parameters
    p = o['parameters']
    # Dictionary for controlling the simulation
    c = o['controls']
    r = o['raster']
    # Dictionaries for the classes
    i = o['perturbation']
    n = o['network']

    # Assign the variables from the dictionary to easily named local variables
    rwe, rwi = (var['rwe'], var['rwi'])
    re, ve, ser, ri, vi, sir = (var['re'], var['ve'], var['ser'],
                                var['ri'], var['vi'], var['sir'])
    swer, swir = (var['swer'], var['swir'])
    if o.get('sp', False):
        sye, syi = (var['sye'], var['syi'])

    se = np.ones(2) * 0.0
    si = np.ones(2) * 0.0
    # Select transfer function for WC equations
    if set(d.systems).intersection({'if-fr', 'if-nf', 'all'}):
        transf = sigmoid_lif
    elif set(d.systems).intersection({'qif-fr', 'qif-nf', 'all'}):
        transf = sigmoid_qif
    else:
        transf = sigmoid_qif

    # Initial parameter values may be changed from now
    pause = None
    while c['pause']:
        try:
            incoming = q_in.get_nowait()
            for key in incoming.keys():
                o[key].update(incoming[key])

        except Queue.Empty:
            if not pause:
                logger.info("Data successfully updated. Simulation in hold.")
                pause = True

    # Set up Connectivity class(es) -> modifies parameters dictionary
    per = Perturbation(o)
    exc = Connectivity(o, 'exc')
    inh = Connectivity(o, 'inh')
    objects = [per, exc, inh, dat.fr]
    # Progress-bar configuration
    widgets = ['Progress: ', pb.Percentage(), ' ',
               pb.Bar(marker='='), ' ', pb.ETA(), ' ']

    # Raster configuration variables
    if d.opts.get('sp', False):
        r_maxlength = 1000
        raster = LimitedSizeDict(size_limit=r_maxlength)
        s_pop = None

    # Measuring frequencies of individual neurons
    spikes_e = []
    spikes_i = []
    tfreq = 0

    if o.get('sp', False):
        if d.nf:
            nmax = d.dNe
        else:
            nmax = d.Ne
        if nmax <= 1000:
            pop_max = nmax
        else:
            pop_max = 1000

    # ############################################################
    # 0) Prepare simulation environment
    if args.loop != 0:  # loop variable will force the time loop to iterate "loop" times more or endlessly if "loop" = 0
        barsteps = int((d.tfinal - d.t0) / d.dt)
        pbar = pb.ProgressBar(widgets=widgets, maxval=args.loop * (barsteps + 1)).start()
    else:
        args.loop = sys.maxint
        # noinspection PyTypeChecker
        pbar = pb.ProgressBar(max_value=pb.UnknownLength)

    time1 = timer()
    tstep = 0
    temps = 0.0
    tsk_e = 0
    tsk_i = 0

    noise_E = 0.0
    noise_I = 0.0

    np.seterr(all='raise')

    # Time loop: (if loop was 0 in the config step,
    #             we can break the time-loop by changing "loop"
    #             or explicitly with a break)
    while temps < d.tfinal * args.loop:
        pause = False
        while c['pause']:
            try:
                incoming = q_in.get_nowait()
                for key in incoming.keys():
                    o[key].update(incoming[key])
            except Queue.Empty:
                if not pause:
                    logger.info("Simulation in hold.")
                    pause = True
        # Time delay discretization
        delay = int(p['delay'] / d.dt)
        if len(se) != (2 + delay):
            se.resize(2 + delay)
            si.resize(2 + delay)
            print 'Delay! %d' % delay
        # Time step variables
        kp = tstep % d.nsteps
        k = (tstep + d.nsteps - 1) % d.nsteps
        kd = (tstep + d.nsteps - 1 - delay) % d.nsteps
        kd2 = (tstep + d.nsteps - delay) % d.nsteps
        # Time steps of the synaptic activation (just 2 positions, columns)
        k2p = tstep % (2 + delay)

        if set(d.systems).intersection({'fr', 'wc', 'all'}):  # Two population FR eqs simulations
            # Wilson Cowan
            if 'wc' in d.systems:
                input_e = p['etae'] + p['taue'] * n['jee'] * swer[k] - p['taue'] * n['jie'] * swir[k] + per.it[kp]
                rwe[kp] = rwe[k] + d.dt / p['taue'] * (-rwe[k] + transf(input_e, p['taue'], p['delta'],
                                                                        vr=p['vreset'], vth=p['vpeak'],
                                                                        vrevers=p['revers'], dt=p['rperiod']))

                input_i = p['etai'] - p['taui'] * n['jii'] * swir[k] + p['taui'] * n['jei'] * swer[k] + i['sym'] * \
                                                                                                        per.it[kp]
                rwi[kp] = rwi[k] + d.dt / p['taui'] * (-rwi[k] + transf(input_i, p['taui'], p['delta'],
                                                                        vr=p['vreset'], vth=p['vpeak'],
                                                                        vrevers=p['revers'], dt=p['rperiod']))
                if p.get('taude', False):
                    if p['taude'] > 0.001 and p['taudi'] > 0.001:
                        swer[kp] = swer[k] + d.dt / p['taude'] * (-swer[k] + rwe[kd])
                        swir[kp] = swir[k] + d.dt / p['taudi'] * (-swir[k] + rwi[kd])
                    else:
                        swer[kp] = rwe[kd2] * 1.0
                        swir[kp] = rwi[kd2] * 1.0
                else:
                    swer[kp] = rwe[kd2] * 1.0
                    swir[kp] = rwi[kd2] * 1.0

            if 'wc-eff' in d.systems:
                input_e = p['etae'] + p['taue'] * n['jee'] * swer[k] - p['taue'] * n['jei'] * swir[k] + per.it[
                    kp]
                rwe[kp] = rwe[k] + d.dt / p['taue'] * (-rwe[k] + transf(input_e, p['taue'], p['delta'],
                                                                        vr=p['vreset'], vth=p['vpeak'],
                                                                        vrevers=p['revers'], dt=p['rperiod']))

                input_i = p['etai'] + p['taui'] * n['jee'] * swir[k] - p['taui'] * n['jei'] * swer[k] + i[
                                                                                                            'sym'] * \
                                                                                                        per.it[
                                                                                                            kp]
                rwi[kp] = rwi[k] + d.dt / p['taui'] * (-rwi[k] + transf(input_i, p['taui'], p['delta'],
                                                                        vr=p['vreset'], vth=p['vpeak'],
                                                                        vrevers=p['revers'], dt=p['rperiod']))
                if p.get('taude', False):
                    if p['taude'] > 0.001 and p['taudi'] > 0.001:
                        swer[kp] = swer[k] + d.dt / p['taude'] * (-swer[k] + rwe[kd])
                        swir[kp] = swir[k] + d.dt / p['taudi'] * (-swir[k] + rwi[kd])
                    else:
                        swer[kp] = rwe[kd2] * 1.0
                        swir[kp] = rwi[kd2] * 1.0
                else:
                    swer[kp] = rwe[kd2] * 1.0
                    swir[kp] = rwi[kd2] * 1.0

            # QIF-FR
            if 'fr' in d.systems:
                if d.cond:
                    re[kp] = re[k] + d.dt / p['taue'] * (
                        p['delta'] / pi / p['taue'] + 2.0 * re[k] * ve[k] - p['taue'] * re[k] * (
                        1.0 / p['taue'] + p['ae'] * n['jee'] * ser[k] + p['ai'] * n['jie'] * sir[k]))
                    ve[kp] = ve[k] + d.dt / p['taue'] * (
                        ve[k] ** 2 + p['etae'] - pi2 * (re[k] * p['taue']) ** 2 - ve[k] - p['taue'] * (
                            p['ae'] * n['jee'] * ser[k] * (ve[k] - p['reverse']) + p['ai'] * n['jie'] * sir[k] * (
                                ve[k] - p['reversi'])) + per.it[kp])

                    ri[kp] = ri[k] + d.dt / p['taui'] * (
                        p['delta'] / pi / p['taui'] + 2.0 * ri[k] * vi[k] - p['taui'] * ri[k] * (
                            1.0 / p['taui'] + p['ae'] * n['jei'] * ser[k] + p['ai'] * n['jii'] * sir[k]))
                    vi[kp] = vi[k] + d.dt / p['taui'] * (
                        vi[k] ** 2 + p['etai'] - pi2 * (ri[k] * p['taui']) ** 2 - vi[k] - p['taui'] * (
                            p['ae'] * n['jei'] * ser[k] * (vi[k] - p['reverse']) + p['ai'] * n['jii'] * sir[k] * (
                                vi[k] - p['reversi'])) + i['sym'] * per.it[kp])
                else:
                    re[kp] = re[k] + d.dt / p['taue'] * (p['delta'] / pi / p['taue'] + 2.0 * re[k] * ve[k])
                    ve[kp] = ve[k] + d.dt / p['taue'] * (ve[k] ** 2 + p['etae'] - pi2 * (re[k] * p['taue']) ** 2
                                                         - p['taue'] * n['jie'] * sir[kd] + p['taue'] * n['jee'] * ser[
                                                             kd]
                                                         + per.it[kp])
                    ri[kp] = ri[k] + d.dt / p['taui'] * (p['delta'] / p['taui'] / pi + 2.0 * ri[k] * vi[k])
                    vi[kp] = vi[k] + d.dt / p['taui'] * (vi[k] ** 2 + p['etai'] - p['taui'] ** 2 * pi2 * ri[k] ** 2
                                                         + p['taui'] * n['jei'] * ser[kd] - p['taui'] * n['jii'] * sir[
                                                             kd]
                                                         + i['sym'] * per.it[kp])
                if p.get('taude', False):
                    if p['taude'] > 0.001 and p['taudi'] > 0.001:
                        ser[kp] = ser[k] + d.dt / p['taude'] * (-ser[k] + re[kd])
                        sir[kp] = sir[k] + d.dt / p['taudi'] * (-sir[k] + ri[kd])
                    else:
                        ser[kp] = re[kd2] * 1.0
                        sir[kp] = ri[kd2] * 1.0
                else:
                    ser[kp] = re[kd2] * 1.0
                    sir[kp] = ri[kd2] * 1.0

            if math.isnan(rwe[kp]) or math.isnan(rwi[kp]) or math.isnan(re[kp]) or math.isnan(ri[kp]):
                logger.error("Overflow encountered! Change parameters before running a new instance of the simulation.")
                break

        if set(d.systems).intersection({'nf', 'wc-nf', 'all'}):  # NF simulations
            # QIF-NF Equations
            if 'nf' in d.systems:
                # TODO: implement taue and taui !!!
                ser[k2p] = (2.0 * d.ne / d.n * np.dot(exc.c, re[k]) + 2.0 * d.ni / d.n * np.dot(inh.c, ri[k]))
                re[kp] = re[k] + d.dt * (p['delta'] / pi + 2.0 * re[k] * ve[k])
                ve[kp] = ve[k] + d.dt * (ve[k] ** 2 + p['etae'] + ser[k2p] - pi2 * re[k] ** 2 + per.it[kp])
                ri[kp] = ri[k] + d.dt * (p['delta'] / pi + 2.0 * ri[k] * vi[k])
                vi[kp] = vi[k] + d.dt * (vi[k] ** 2 + p['etai'] + ser[k2p] - pi2 * ri[k] ** 2 + i['sym'] * per.it[kp])
            # WC-NF Equations.
            if 'wc-nf' in d.systems:
                ser[k2p] = (2.0 * d.ne / d.n * np.dot(exc.c, rwe[k]) + 2.0 * d.ni / d.n * np.dot(inh.c, rwi[k]))
                rwe[kp] = rwe[k] + d.dt / p['taue'] * (
                    -rwe[k] + sigmoid_qif(p['etae'] + p['taue'] * ser[k2p] + per.it[kp], p['taue'], p['delta']))
                rwi[kp] = rwi[k] + d.dt / p['taui'] * (
                    -rwi[k] + sigmoid_qif(p['etai'] + p['taui'] * ser[k2p] + i['sym'] * per.it[kp], p['taui'],
                                          p['delta']))

            if math.isnan(rwe[kp, 0]) or math.isnan(rwi[kp, 0]) or math.isnan(re[kp, 0]) or math.isnan(ri[kp, 0]):
                logger.error("Overflow encountered! Change parameters before running a new instance of the simulation.")
                break

        # Spiking neurons
        if set(d.systems).intersection(
                {'qif-fr', 'if-fr', 'eif-fr', 'qif-nf', 'if-nf', 'eif-nf', 'all'}):  # Spiking neurons
            tsyp = tstep % d.t_syn
            if d.spk_time_e or d.spk_time_i:
                tskp_e = tstep % d.spk_time_e
                tskp_i = tstep % d.spk_time_i
                tsk_e = (tstep + d.spk_time_e - 1) % d.spk_time_e
                tsk_i = (tstep + d.spk_time_i - 1) % d.spk_time_i
            else:
                tsk_e = tsk_i = tskp_e = tskp_i = 0

            if o['D'] == 'noise':
                noise_E = p['taue'] / d.dt * p['delta'] * np.sqrt(d.dt / p['taue']) * noise(d.Ne)
                noise_I = p['taui'] / d.dt * p['delta'] * np.sqrt(d.dt / p['taui']) * noise(d.Ni)

            if set(d.systems).intersection(
                    {'qif-fr', 'if-fr', 'eif-fr', 'all'}):  # QIF or IF population simulation (FR)
                sep = np.dot(d.spk_e, d.a_tau[:, tsyp]).mean()
                sip = np.dot(d.spk_i, d.a_tau[:, tsyp]).mean()
                if p.get('taude', False):
                    if p['taude'] > 0.001 and p['taudi'] > 0.001:
                        sye[kp] = sye[k] + d.dt / p['taude'] * (-sye[k] + sep)
                        syi[kp] = syi[k] + d.dt / p['taudi'] * (-syi[k] + sip)
                    else:
                        sye[kp] = sep * 1.0
                        syi[kp] = sip * 1.0
                else:
                    sye[kp] = sep * 1.0
                    syi[kp] = sip * 1.0

                if set(d.systems).intersection({'qif-fr', 'all'}):  # QIF population simulation (FR)
                    if d.cond:
                        d.m_e = qifint_cond(d.m_e, d.m_e['v'], d.m_e['t'],
                                             d.eta_e + noise_E + per.it[kp] * np.ones(d.Ne),
                                             p['taue'] * p['ae'] * n['jee'] * sye[kp], p['taue'] * p['ai'] * n['jie'] * syi[kp], temps,
                                             d.Ne,
                                             d.dt,
                                             p['taue'], d.vpeak, d.rte, d.tpeak_e, p['reverse'], p['reversi'])
                        d.m_i = qifint_cond(d.m_i, d.m_i['v'], d.m_i['t'], d.eta_i + noise_I + i['sym'] * per.it[
                            kp] * np.ones(d.Ni),
                                             p['taui'] * p['ae'] * n['jei'] * sye[kp], p['taui'] * p['ai'] * n['jii'] * syi[kp],
                                             temps,
                                             d.Ni, d.dt, p['taui'], d.vpeak, d.rti, d.tpeak_i, p['reverse'],
                                             p['reversi'])
                    else:
                        d.m_e = qifint_fr(d.m_e, d.m_e['v'], d.m_e['t'], d.eta_e + noise_E,
                                          p['taue'] * n['jee'] * sye[kp] - p['taue'] * n['jie'] * syi[kp] + per.it[kp],
                                          temps, d.Ne,
                                          d.dt,
                                          p['taue'], d.vpeak, d.rte, d.tpeak_e)
                        d.m_i = qifint_fr(d.m_i, d.m_i['v'], d.m_i['t'], d.eta_i + noise_I,
                                          p['taui'] * n['jei'] * sye[kp] - p['taui'] * n['jii'] * syi[kp] + i['sym'] *
                                          per.it[kp],
                                          temps,
                                          d.Ni, d.dt, p['taui'], d.vpeak, d.rti, d.tpeak_i)

                if set(d.systems).intersection({'if-fr', 'all'}):  # LIF population simulation (FR)
                    d.m_e = ifint_fr(d.m_e, d.m_e['v'], d.m_e['t'], d.eta_e + noise_E,
                                     p['taue'] * n['jee'] * sye[kp] - p['taue'] * n['jie'] * syi[kp] + per.it[kp],
                                     temps, d.Ne,
                                     d.dt,
                                     p['taue'], p['vpeak'], p['vreset'], p['revers'], p['rperiod'], d.tpeak_e)
                    d.m_i = ifint_fr(d.m_i, d.m_i['v'], d.m_i['t'], d.eta_i + noise_I,
                                     p['taui'] * n['jei'] * sye[kp] - p['taui'] * n['jii'] * syi[kp] + i['sym'] *
                                     per.it[kp],
                                     temps,
                                     d.Ni, d.dt, p['taui'], p['vpeak'], p['vreset'], p['revers'], p['rperiod'],
                                     d.tpeak_i)

                if set(d.systems).intersection({'eif-fr', 'all'}):  # LIF population simulation (FR)
                    d.m_e = eifint_fr(d.m_e, d.m_e['v'], d.m_e['t'], d.eta_e + noise_E,
                                      p['taue'] * n['jee'] * sye[kp] - p['taue'] * n['jie'] * syi[kp] + per.it[kp],
                                      temps, d.Ne,
                                      d.dt, p['taue'], p['vpeak'], p['vreset'], p['revers'],
                                      p['rperiod'], d.tpeak_e, p['sharp'], p['rheo'])
                    d.m_i = eifint_fr(d.m_i, d.m_i['v'], d.m_i['t'], d.eta_i + noise_I,
                                      p['taui'] * n['jei'] * sye[kp] - p['taui'] * n['jii'] * syi[kp] + i['sym'] *
                                      per.it[kp],
                                      temps, d.Ni, d.dt, p['taui'], p['vpeak'], p['vreset'], p['revers'],
                                      p['rperiod'], d.tpeak_i, p['sharp'], p['rheo'])
                # Auxiliary matrices for FR computation
                if d.spk_time_e or d.spk_time_i:
                    d.spk_e_mod[:, tsk_e] = 1 * d.m_e['s']
                    d.spk_e[:, tsyp] = 1 * d.spk_e_mod[:, tskp_e]
                    d.spk_i_mod[:, tsk_i] = 1 * d.m_i['s']
                    d.spk_i[:, tsyp] = 1 * d.spk_i_mod[:, tskp_i]
                else:
                    d.spk_e[:, tsyp] = 1 * d.m_e['s']
                    d.spk_i[:, tsyp] = 1 * d.m_i['s']

                dat.fr.spikes_e[:, tstep % dat.fr.sld_steps] = 1 * d.spk_e[:, tsyp]
                dat.fr.spikes_i[:, tstep % dat.fr.sld_steps] = 1 * d.spk_i[:, tsyp]
                dat.fr.firing_rate(tstep, temps, var)

            if set(d.systems).intersection({'qif-nf', 'if-nf', 'eif-nf', 'all'}):  # QIF population simulation (NF)
                sep = (1.0 / d.Ne) * np.dot(exc.c, np.dot(d.aux['e'], np.dot(d.spk_e, d.a_tau[:, tsyp])))
                sip = (1.0 / d.Ni) * np.dot(inh.c, np.dot(d.aux['i'], np.dot(d.spk_i, d.a_tau[:, tsyp])))
                s = sep + sip

                if set(d.systems).intersection({'qif-nf', 'all'}):  # QIF population simulation (NF)
                    d.m_e = qifint_nf(d.m_e, d.m_e['v'], d.m_e['t'], d.eta_e + noise_E,
                                      p['taue'] * s + per.it[kp], temps, d.Ne, d.dNe, d.dt,
                                      p['taue'], d.vpeak, d.rte, d.tpeak_e)
                    d.m_i = qifint_nf(d.m_i, d.m_i['v'], d.m_i['t'], d.eta_i + noise_I,
                                      p['taue'] * s + i['sym'] * per.it[kp], temps, d.Ne, d.dNe, d.dt,
                                      p['taui'], d.vpeak, d.rti, d.tpeak_i)

                if set(d.systems).intersection({'if-nf', 'all'}):  # LIF population simulation (NF)
                    d.m_e = ifint_nf(d.m_e, d.m_e['v'], d.m_e['t'], d.eta_e + noise_E,
                                     p['taue'] * s + per.it[kp], temps, d.Ne, d.dNe, d.dt,
                                     p['taue'], p['vpeak'], p['vreset'], p['revers'], p['rperiod'], d.tpeak_e)
                    d.m_i = ifint_nf(d.m_i, d.m_i['v'], d.m_i['t'], d.eta_i + noise_I,
                                     p['taui'] * s + i['sym'] * per.it[kp], temps, d.Ne, d.dNe, d.dt,
                                     p['taui'], p['vpeak'], p['vreset'], p['revers'], p['rperiod'], d.tpeak_i)

                if set(d.systems).intersection({'eif-nf', 'all'}):  # EIF population simulation (NF)
                    d.m_e = eifint_nf(d.m_e, d.m_e['v'], d.m_e['t'], d.eta_e + noise_E, p['taue'] * s + per.it[kp],
                                      temps, d.Ne,
                                      d.dNe, d.dt, p['taue'], p['vpeak'], p['vreset'], p['revers'], p['rperiod'],
                                      d.tpeak_e,
                                      p['sharp'], p['rheo'])
                    d.m_i = eifint_nf(d.m_i, d.m_i['v'], d.m_i['t'], d.eta_i + noise_I,
                                      p['taui'] * s + i['sym'] * per.it[kp],
                                      temps, d.Ne, d.dNe, d.dt, p['taui'], p['vpeak'], p['vreset'], p['revers'],
                                      p['rperiod'],
                                      d.tpeak_i, p['sharp'], p['rheo'])

                # Auxiliary matrices for FR computation
                if d.spk_time_e or d.spk_time_i:
                    d.spk_e_mod[:, tsk_e] = 1 * d.m_e['s']
                    d.spk_e[:, tsyp] = 1 * d.spk_e_mod[:, tskp_e]
                    d.spk_i_mod[:, tsk_i] = 1 * d.m_i['s']
                    d.spk_i[:, tsyp] = 1 * d.spk_i_mod[:, tskp_i]
                else:
                    d.spk_e[:, tsyp] = 1 * d.m_e['s']
                    d.spk_i[:, tsyp] = 1 * d.m_i['s']

                dat.fr.spikes_e[:, tstep % dat.fr.sld_steps] = 1 * d.spk_e[:, tsyp]
                dat.fr.spikes_i[:, tstep % dat.fr.sld_steps] = 1 * d.spk_i[:, tsyp]
                dat.fr.firing_rate(tstep, temps, var, dat.aux)

            # Recording of spikie times for Raster Plotting (2 versions, snapshot and dynamic plotting)
            if r['start'] and tstep % r['rate'] == 0:
                # For the dynamic plot we can use a simple dictionary with a time as the key and an array of indexes.
                if r['dynamic']:
                    if q_out.qsize() < 10:
                        if r['pop']:
                            pop = r['pop']
                            q_out.put({'t': temps, 'sp': d.m_e[d.pope[pop]][d.m_e[d.pope[pop]]['s'] == 1]['i']})
                        else:
                            q_out.put({'t': temps, 'sp': d.m_e[d.m_e['s'] == 1]['i']})
                # For the snapshot we use a limited size dictionary with times as keys.
                else:
                    if not s_pop:
                        if d.nf and r['pop']:
                            s_pop = random.sample(range(d.dNe * r['pop'], d.dNe * (r['pop'] + 1)), pop_max)
                        else:
                            s_pop = random.sample(range(d.Ne), pop_max)
                    s_pop.sort()
                    raster[temps] = d.m_e[s_pop][d.m_e[s_pop]['s'] == 1]['i']

            # Measure firing rate of individual neurons
            if c.get('neuronf', False):
                if len(spikes_e) > 1:
                    spikes_e += d.m_e['s']
                    spikes_i += d.m_i['s']
                else:
                    tfreq = tstep
                    spikes_e = d.m_e['s'][:] * np.float64(1.0)
                    spikes_i = d.m_i['s'][:] * np.float64(1.0)

        pbar.update(tstep)

        # Perturbation management
        if per.active and tstep % int(p['upd']) == 0:
            per.check(tstep)

        if c['exit'] or c['stop']:
            break

        temps += d.dt
        tstep += 1

        if tstep % d.nsteps == 0:
            var['cycle'].value += 1
        var['tstep'].value = tstep
        var['temps'].value = temps
        if not d.nf:
            var['it'][kp] = per.it[kp]

        # We get the data from the GUI
        if q_in and tstep % int(p['upd']) == 0:
            try:
                incoming = q_in.get_nowait()
                for key in incoming.keys():
                    logger.debug("Updating data dictionary.")
                    o[key].update(incoming[key])
                    # Passing spiking neuron data to the Main GUI
                    if incoming[key].get('pause', False):
                        if o.get('sp', False):
                            sp_dict = {'opts': o, 'm_e': d.m_e.copy(), 'm_i': d.m_i.copy(),
                                       'spk_e': np.roll(d.spk_e, -(tsyp + 1)), 'spk_i': np.roll(d.spk_i, -(tsyp + 1))}
                            sp_dict['m_e']['t'] -= (temps - d.dt)
                            sp_dict['m_i']['t'] -= (temps - d.dt)
                            try:
                                sp_qif = {'spk_mod_e': np.roll(d.spk_e_mod, -(tsk_e + 1)),
                                          'spk_mod_i': np.roll(d.spk_i_mod, -(tsk_i + 1))}
                                sp_dict.update(sp_qif)
                            except:
                                pass
                            q_out.put(sp_dict)
                            del sp_dict
                        else:
                            q_out.put({'opts': o})
                    # Passing raster data to the main GUI
                    if incoming[key].get('update', False):
                        q_out.put(raster)
                    # Receiving order to save voltage data
                    if incoming[key].get('vsnapshot', False):
                        np.save('volt_distribution_%f' % (d.dt * kp * d.faketau), d.m_e['v'])
                    if incoming[key].get('neuronf', False):
                        if tfreq == 0:
                            logger.info('Measuring frequencies of individual neurons...')
                            spikes_e = []
                            spikes_i = []
                        else:
                            logger.info('Measure of frequencies done.')
                            mylog.info(0, True)
                            c['neuronf'] = False
                            dtfreq = tstep - 1 - tfreq
                            tfreq = 0
                            freqs_e = np.array(spikes_e) / dtfreq
                            freqs_i = np.array(spikes_i) / dtfreq
                            np.save('nfreqs.npy', {'time': dtfreq, 'freqse': freqs_e, 'freqsi': freqs_i})
                    if o.get('sp', False):
                        if isinstance(incoming[key].get('delta', False), float) \
                                or isinstance(incoming[key].get('etai', False), float) \
                                or isinstance(incoming[key].get('etae', False), float):
                            d.eta_e = Data.external_currents(p['etae'], p['delta'], d.Ne, n=d.n,
                                                             distribution=o['D'])
                            d.eta_i = Data.external_currents(p['etai'], p['delta'], d.Ni, n=d.n,
                                                             distribution=o['D'])
                        if incoming[key].get('gamma', False) \
                                or incoming[key].get('reversi', False) or incoming[key].get('reverse', False):
                            if d.cond:
                                d.rev_e = Data.external_currents(p['reverse'], p['gamma'], d.Ne, n=d.n,
                                                                 distribution=o['G'])
                                d.rev_i = Data.external_currents(p['reversi'], p['gamma'], d.Ni, n=d.n,
                                                                 distribution=o['G'])
                # Updating the objects' properties (perturbation, connectivity)
                for obj in objects:
                    if obj:
                        if obj.name in incoming.keys():
                            logger.debug("Updating %s" % obj.name)
                            obj.update(o[obj.name], tstep=tstep)
            except Queue.Empty:
                pass
            except KeyError:
                logger.error("KeyError when getting or sending objects through the queue.")

    # Finish pbar
    pbar.finish()
    temps -= d.dt
    tstep -= 1
    # Stop the timer
    logger.info('Total time: {}.'.format(timer() - time1))
    # Synchronize data object
    while not q_out.empty():
        q_out.get_nowait()
    q_out.put({'opts': o})
    if o.get('sp', False):
        sp_dict = {'opts': o, 'm_e': d.m_e.copy(), 'm_i': d.m_i.copy(),
                   'spk_e': np.roll(d.spk_e, -(tsyp + 1)), 'spk_i': np.roll(d.spk_i, -(tsyp + 1))}
        sp_dict['m_e']['t'] -= (temps - d.dt)
        sp_dict['m_i']['t'] -= (temps - d.dt)
        try:
            sp_qif = {'spk_mod_e': np.roll(d.spk_e_mod, -(tsk_e + 1)),
                      'spk_mod_i': np.roll(d.spk_i_mod, -(tsk_i + 1))}
            sp_dict.update(sp_qif)
        except:
            pass
        q_out.put(sp_dict)
        del sp_dict
Exemple #11
0
OBS = getOut(TEST)
print OBS

os.system('mkdir -p '+STEM+'_EVAL')
os.system('mkdir -p '+STEM+'_RESULT')

def removeScore(comboLine):
 return ' '.join(comboLine.strip().split(' ')[1:])

c = 1
GFILE = open(STEM+'_RESULT/RFINAL.result',"w")
GCONFUSION = [0,0,0,0]

if pbar_exists:
 widgets = ["Evaluating combos: ", P.Percentage(), P.Bar()]
 pbar = P.ProgressBar(maxval=100, widgets=widgets).start()

maxacc = 0
maxprec = 0
maxsens = 0
maxspec = 0

#etExec = '/home/kurekaoru/opencog/X/opencog/comboreduct/main/eval-table'
etExec = 'eval-table'

c = 0
for x in MODELS:
 CONFUSION = [0,0,0,0]
 oneliner = open(STEM+'_EVAL/M'+str(c)+'.model','w')
 M = removeScore(x)
Exemple #12
0
    def finish(self, resume=False, verbose=False, use_pbar=True):
        """Block and complete all threads in queue.
        
        Args:
            resume (bool, optional): Resume spooling after finished
            verbose (bool, optional): Report progress towards queue completion.
            use_pbar (bool, optional): Graphically display progress towards queue completions
        """
        # Stop existing spools
        self.background_spool = False
        self.dirty.set()

        # By default, we don't use a callback.
        cb = None

        if verbose:
            print(self)

        # Progress bar management, optional.
        self._pbar_max = self.numRunningThreads + len(self.queue)
        if use_pbar and self._pbar_max > 0:
            widgets = [
                ("[{n}] ".format(n=self.name) if self.name else ''), progressbar.Percentage(),
                ' ', progressbar.SimpleProgress(format='%(value_s)s of %(max_value_s)s'),
                ' ', progressbar.Bar(),
                ' ', DynamicProgressString(name="state"),
                ' ', progressbar.Timer(),
                ' ', progressbar.AdaptiveETA(),
            ]
            self.progbar = progbar = progressbar.ProgressBar(max_value=self._pbar_max, widgets=widgets, redirect_stdout=True)

            def updateProgrssBar():
                # Update progress bar.
                q = (len(self.queue) if self.queue else 0)
                nrt = self.numRunningThreads
                progress = (self._pbar_max - (nrt + q))
                state = "[Spool: Q: {q:2}, R: {nrt:2}/{quota}]".format(quota=self.quota, **locals())
                progbar.max_value = self._pbar_max
                progbar.update(progress, state=state)
            cb = updateProgrssBar

        # assert not self.spoolThread.isAlive, "Background loop did not terminate"
        # Create a spoolloop, but block until it deploys all threads.
        execif(cb)
        while (self.queue and len(self.queue) > 0) or (self.numRunningThreads > 0):
            self.dirty.wait()
            self.doSpool(verbose=verbose)        
            self.dirty.clear()
            execif(cb)

        assert len(self.queue) == 0, "Finished without deploying all threads"
        assert self.numRunningThreads == 0, "Finished without finishing all threads"
        
        if cb:
            progbar.finish()

        if resume:
            self.queue.clear()  # Create a fresh queue
            self.start()

        if verbose:
            print(self)
Exemple #13
0
            self.vxgal = numpy.array([])
            self.vygal = numpy.array([])
            self.vzgal = numpy.array([])
            self.xgal = numpy.array([])
            self.ygal = numpy.array([])
            self.z_cos = numpy.array([])
            self.z_obs = numpy.array([])
            self.zgal = numpy.array([])

    Lagos = lightcone()

    ###  Initializing progress bar
    widgets = [
        '  Running: ',
        progressbar.Percentage(), ' ',
        progressbar.Bar(marker=progressbar.RotatingMarker()), ' ',
        progressbar.ETA(), ' ',
        progressbar.FileTransferSpeed()
    ]
    pbar = progressbar.ProgressBar(widgets=widgets,
                                   maxval=len(files) + 1).start()

    for i_file in range(len(files)):

        pbar.update(i_file + 1)

        fopen = h5py.File(files[i_file], "r")
        try:
            dset = fopen.get('Data')
            Lagos.BCDM = numpy.append(Lagos.BCDM, dset["BCDM"].value)
            Lagos.BoT = numpy.append(Lagos.BoT, dset["BoT"].value)
Exemple #14
0
def mvpa(conf, paths):
    "Runs the MVPA analysis"

    group_conf = ns_aperture.config.get_conf()
    group_paths = ns_aperture.paths.get_group_paths(group_conf)

    cond_info = np.loadtxt(paths.mvpa.cond_info.full(".txt"), np.int)

    for hemi in ["lh", "rh"]:

        # nodes x runs x blocks
        data = np.load(paths.mvpa.data.full("_" + hemi + ".npy"))

        seed_nodes = np.loadtxt(paths.mvpa.nodes.full("_" + hemi + ".txt"),
                                np.int)

        acc = np.empty((data.shape[0]))
        acc.fill(np.NAN)

        pbar = progressbar.ProgressBar(
            widgets=[progressbar.Percentage(),
                     progressbar.Bar()],
            maxval=seed_nodes.shape[0]).start()

        with open(group_paths.sl_info.full("_" + hemi + ".txt"),
                  "r") as sl_info:

            for (i_seed, (seed_node, node_line)) in enumerate(
                    zip(seed_nodes, sl_info.readlines())):

                pbar.update(i_seed)

                nodes = [int(x) for x in node_line.splitlines()[0].split("\t")]

                assert seed_node in nodes

                i_nodes = []

                for sl_node in nodes:
                    i = np.where(seed_nodes == sl_node)[0]
                    if len(i) > 0:
                        assert len(i) == 1
                        i_nodes.append(i[0])

                if len(i_nodes) > 0:
                    acc[i_seed] = _classify(data[i_nodes, ...], cond_info)

        # save acc
        acc_path = paths.mvpa.acc.full("_" + hemi + ".txt")
        np.savetxt(acc_path, acc)

        pbar.finish()

        os.chdir(paths.mvpa.base.full())

        # convert to full niml
        cmd = [
            "ConvertDset", "-i_1D", "-input", acc_path, "-node_index_1D",
            paths.mvpa.nodes.full("_" + hemi + ".txt"), "-o_niml", "-prefix",
            paths.mvpa.acc.full("_" + hemi + ".niml.dset"), "-pad_to_node",
            "ld141", "-overwrite"
        ]

        fmri_tools.utils.run_cmd(" ".join(cmd))
        page = requests.get(url)  # getting page
    if page.status_code != 200:
        print(topicID + ' has status code ' + str(page.status_code))
        return

    # try:
    #     page = urllib.request.urlopen(url)
    # except HTTPError:
    #     return

    print(topicID)
    f.write(topicID + '\n')
    f.flush()
    os.fsync(f)

widgets = [progressbar.Percentage(), ' ', progressbar.Counter(), ' ', progressbar.Bar(), ' ',
           progressbar.FileTransferSpeed()]


# check_and_write(0)

# pbar = progressbar.ProgressBar(widgets=widgets, max_value=1500000).start()
# counter = 0

# Parallel(n_jobs=20)(delayed(check_and_write)(i) for i in range(1500000))
last_id = 0
if os.path.exists(outputLastFile):
    with open(outputLastFile, 'r') as last_id_f:
        last_id = int(last_id_f.read(100))

for i in range(last_id, 100000):
Exemple #16
0
    def scan(self):
        self.pulser_dac_parameters = self.scan_parameters.PlsrDAC
        self.colpr_addr_parameters = self.scan_parameters.Colpr_Addr

        description = np.dtype([
            ('colpr_addr', np.uint32), ('PlsrDAC', np.int32),
            ('voltage', np.float)
        ])  # output data table description, native NumPy dtype
        data = self.raw_data_file.h5_file.create_table(
            self.raw_data_file.h5_file.root,
            name='plsr_dac_data',
            description=description,
            title='Data from PlsrDAC calibration scan')

        progress_bar = progressbar.ProgressBar(
            widgets=[
                '',
                progressbar.Percentage(), ' ',
                progressbar.Bar(marker='*', left='|', right='|'), ' ',
                progressbar.AdaptiveETA()
            ],
            maxval=len(self.pulser_dac_parameters) *
            len(self.colpr_addr_parameters) * self.repeat_measurements,
            term_width=80)
        progress_bar.start()
        progress_bar_index = 0

        for colpr_address in self.colpr_addr_parameters:
            if self.abort_run.is_set():
                break
            self.set_scan_parameters(Colpr_Addr=colpr_address)

            commands = []
            commands.extend(self.register.get_commands("ConfMode"))
            self.register.set_global_register_value("Colpr_Addr",
                                                    colpr_address)
            commands.extend(
                self.register.get_commands("WrRegister", name="Colpr_Addr"))
            commands.extend(self.register.get_commands("RunMode"))
            self.register_utils.send_commands(commands)

            for pulser_dac in self.pulser_dac_parameters:
                if self.abort_run.is_set():
                    break
                self.set_scan_parameters(PlsrDAC=pulser_dac)
                commands = []
                commands.extend(self.register.get_commands("ConfMode"))
                self.register.set_global_register_value("PlsrDAC", pulser_dac)
                commands.extend(
                    self.register.get_commands("WrRegister", name="PlsrDAC"))
                commands.extend(self.register.get_commands("RunMode"))
                self.register_utils.send_commands(commands)

                actual_data = np.zeros(shape=(self.repeat_measurements, ),
                                       dtype=description)
                actual_data['colpr_addr'] = colpr_address
                actual_data["PlsrDAC"] = pulser_dac

                for index, pulser_dac in enumerate(
                        range(self.repeat_measurements)):
                    voltage_string = self.dut['Multimeter'].get_voltage()
                    voltage = float(voltage_string.split(',')[0])

                    actual_data['voltage'][index] = voltage
                    #                     logging.info('Measured %.2fV', voltage)
                    progress_bar_index += 1
                    progress_bar.update(progress_bar_index)
                # append data to HDF5 file
                data.append(actual_data)
        progress_bar.finish()
        data.flush()
Exemple #17
0
                          test_size=config.NUM_TEST_IMAGES,
                          stratify=trainLabels,
                          random_state=42)
trainPaths, testPaths, trainLabels, testLabels = splits

datasets = [('train', trainPaths, trainLabels, config.TRAIN_MX_LIST),
            ('val', valPaths, valLabels, config.VAL_MX_LIST),
            ('test', testPaths, testLabels, config.TEST_MX_LIST)]

R, G, B = [], [], []
for dType, paths, labels, outputPath in datasets:
    print('[INFO] building {}...'.format(outputPath))
    widgets = [
        'Building List: ',
        progressbar.Percentage(), ' ',
        progressbar.Bar(), ' ',
        progressbar.ETA()
    ]
    pbar = progressbar.ProgressBar(maxval=len(paths), widgets=widgets).start()

    f = open(outputPath, 'w')
    for i, (path, label) in enumerate(zip(paths, labels)):
        row = '\t'.join([str(i), str(label), path])
        f.write('{}\n'.format(row))
        if dType == 'train':
            image = cv2.imread(path)
            b, g, r = cv2.mean(image)[:3]
            R.append(r)
            G.append(g)
            B.append(b)
        pbar.update(i)
Exemple #18
0
    def __init__(self, body_pose, *args, **kwargs):
        # Necessary Paths
        ## Download these and specify file paths
        protoFile = "Pose-Estimation-Clean-master/models/pose/mpi/pose_deploy_linevec.prototxt"
        weightsFile = "Pose-Estimation-Clean-master/models/pose/mpi/pose_iter_160000.caffemodel"
        global path
        video_path = path
        csv_path = 'body_pose_output.csv'

        # Load the model and the weights
        net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)

        # Store the input video specifics
        cap = cv2.VideoCapture(video_path)
        n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        fps = int(cap.get(cv2.CAP_PROP_FPS))
        ok, frame = cap.read()
        #(frameHeight, frameWidth) = cap.frame.shape[:2]
        #h = 500
        #w = int((h/frameHeight) * frameWidth)
        h = 500
        w = 890

        # Dimensions for inputing into the model
        inHeight = 368
        inWidth = 368

        # Set up the progressbar
        widgets = [
            "--[INFO]-- Analyzing Video: ",
            progressbar.Percentage(), " ",
            progressbar.Bar(), " ",
            progressbar.ETA()
        ]
        pbar = progressbar.ProgressBar(maxval=n_frames,
                                       widgets=widgets).start()
        p = 0

        data = []
        previous_x, previous_y = [
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
        ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]

        # Define the output
        out_path = 'outputs/out_11.mp4'
        output = cv2.VideoWriter(out_path, 0, fps, (w, h))

        fourcc = cv2.VideoWriter_fourcc(*'MP44')
        writer = None
        (f_h, f_w) = (h, w)
        zeros = None

        # There are 15 points in the skeleton
        pairs = [
            [0, 1],  # head
            [1, 2],
            [1, 5],  # sholders
            [2, 3],
            [3, 4],
            [5, 6],
            [6, 7],  # arms
            [1, 14],
            [14, 11],
            [14, 8],  # hips
            [8, 9],
            [9, 10],
            [11, 12],
            [12, 13]
        ]  # legs

        # probability threshold fro prediction of the coordinates
        thresh = 0.4

        circle_color, line_color = (0, 255, 255), (0, 255, 0)

        # Start the iteration
        while True:
            ok, frame = cap.read()

            if ok != True:
                break

            frame = cv2.resize(frame, (w, h), cv2.INTER_AREA)
            frame_copy = np.copy(frame)

            # Input the frame into the model
            inpBlob = cv2.dnn.blobFromImage(frame_copy,
                                            1.0 / 255, (inWidth, inHeight),
                                            (0, 0, 0),
                                            swapRB=False,
                                            crop=False)
            net.setInput(inpBlob)
            output = net.forward()

            H = output.shape[2]
            W = output.shape[3]

            points = []
            x_data, y_data = [], []

            # Iterate through the returned output and store the data
            for i in range(15):
                probMap = output[0, i, :, :]
                minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
                x = (w * point[0]) / W
                y = (h * point[1]) / H

                if prob > thresh:
                    points.append((int(x), int(y)))
                    x_data.append(x)
                    y_data.append(y)
                else:
                    points.append((0, 0))
                    x_data.append(previous_x[i])
                    y_data.append(previous_y[i])

            for i in range(len(points)):
                cv2.circle(frame_copy, (points[i][0], points[i][1]), 2,
                           circle_color, -1)

            for pair in pairs:
                partA = pair[0]
                partB = pair[1]
                cv2.line(frame_copy,
                         points[partA],
                         points[partB],
                         line_color,
                         1,
                         lineType=cv2.LINE_AA)

            if writer is None:
                writer = cv2.VideoWriter(out_path, fourcc, fps, (f_w, f_h),
                                         True)
                zeros = np.zeros((f_h, f_w), dtype="uint8")

            writer.write(cv2.resize(frame_copy, (f_w, f_h)))

            cv2.imshow('Body pose analysis', frame_copy)

            data.append(x_data + y_data)
            previous_x, previous_y = x_data, y_data

            p += 1
            pbar.update(p)

            key = cv2.waitKey(1) & 0xFF

            if key == ord("q"):
                break

        # Save the output data from the video in CSV format
        df = pd.DataFrame(data)
        df.to_csv(csv_path, index=False)
        print('save complete')

        pbar.finish()
        cap.release()
        cv2.destroyAllWindows()
Exemple #19
0
def validate(root, client, dbname, password):
    crypto = None
    token = None
    base = os.path.join(root, client)
    cache = CacheDir.CacheDir(base)
    if password:
        crypto = TardisCrypto.TardisCrypto(password, client)
        token = crypto.encryptFilename(client)
    db = TardisDB.TardisDB(os.path.join(base, dbname),
                           token=token,
                           backup=False)
    regen = Regenerate.Regenerator(cache, db, crypto)

    conn = db.conn

    cur = conn.execute("SELECT count(*) FROM CheckSums WHERE IsFile = 1")
    row = cur.fetchone()
    num = row[0]
    print "Checksums: %d" % (num)

    cur = conn.execute(
        "SELECT Checksum FROM CheckSums WHERE IsFile = 1 ORDER BY Checksum ASC"
    )
    pbar = pb.ProgressBar(widgets=[
        pb.Percentage(), ' ',
        pb.Counter(), ' ',
        pb.Bar(), ' ',
        pb.ETA(), ' ',
        pb.Timer()
    ],
                          maxval=num)
    pbar.start()

    row = cur.fetchone()
    i = 1
    while row is not None:
        pbar.update(i)
        i += 1
        try:
            checksum = row['Checksum']
            if not checksum in checked:
                try:
                    f = regen.recoverChecksum(checksum)
                    if f:
                        m = hashlib.md5()
                        d = f.read(128 * 1024)
                        while d:
                            m.update(d)
                            d = f.read(128 * 1024)
                        res = m.hexdigest()
                        if res != checksum:
                            print "Checksums don't match.  Expected: %s, result %s" % (
                                checksum, res)
                            checked[checksum] = 0
                            output.write(checksum + '\n')
                            output.flush()
                        else:
                            checked[checksum] = 1
                            valid.write(checksum + "\n")
                except Exception as e:
                    print "Caught exception processing %s: %s" % (checksum,
                                                                  str(e))
                    output.write(checksum + '\n')
                    output.flush()

            row = cur.fetchone()
        except sqlite3.OperationalError as e:
            print "Caught operational error.  DB is probably locked.  Sleeping for a bit"
            time.sleep(90)
    pbar.finish()
Exemple #20
0
    def _execute_plan(self, created_plan, dryrun):
        """Execute a created plan."""
        self._action_successful = None
        if created_plan.status_int == 204:
            self._action_successful = True
            return 'GLU Console message: %s' % (created_plan.status.split(
                ' ', 1)[-1])

        url2uri_pat = r'https?://[-.:\w]+/(?:.*?/)?%s/' % self.uri_path

        # unique identifier for the plan just created
        plan_url = created_plan['location']
        plan_url = re.sub(url2uri_pat, '', plan_url)
        logger.debug('plan url = %s', plan_url)

        # inspect execution plan here, if you need
        exec_plan = self._do_request(plan_url, 'GET')
        logger.debug('body = %s', exec_plan.body)

        if dryrun:
            self._action_successful = True
            return exec_plan.body

        # execute the plan
        plan_url += '/execution'
        logger.info('executing plan: %s', plan_url)
        plan_status = self._do_request(plan_url, 'POST')

        # check status of plan execution
        status_url = plan_status['location']
        status_url = re.sub(url2uri_pat, '', status_url)
        logger.info('status url = %s', status_url)

        # wait until plan is 100% executed.
        completed = re.compile(r'^100')

        if use_progressbar:
            widgets = [
                ' ',
                progressbar.Percentage(), ' ',
                progressbar.Bar(marker='*', left='[', right=']'), ' ',
                progressbar.ETA(), ' '
            ]

            progress = progressbar.ProgressBar(widgets=widgets, maxval=100)
            progress.start()

        while 1:
            progress_status = self._do_request(status_url, 'HEAD')
            complete_status = progress_status['x-glu-completion']
            percent_complete = re.split(':', complete_status)

            if not completed.match(complete_status):
                if use_progressbar:
                    progress.update(int(percent_complete[0]))
                else:
                    logger.info('InProgress: %s%% complete',
                                percent_complete[0])

            else:
                if use_progressbar:
                    progress.finish()
                else:
                    logger.info('Completed : %s', complete_status)

                break

            time.sleep(2)

        self._action_successful = complete_status.startswith('100:COMPLETED')
        return complete_status
Exemple #21
0
def train_model(train_trees, val_trees, labels, embeddings, embedding_lookup,
                opt):
    max_acc = 0.0
    logdir = opt.model_path
    batch_size = opt.train_batch_size
    epochs = opt.niter
    num_feats = len(embeddings[0])

    random.shuffle(train_trees)

    nodes_node, children_node, codecaps_node = network.init_net_treecaps(
        num_feats, len(labels))

    codecaps_node = tf.identity(codecaps_node, name="codecaps_node")

    out_node = network.out_layer(codecaps_node)
    labels_node, loss_node = network.loss_layer(codecaps_node, len(labels))

    optimizer = RAdamOptimizer(opt.lr)
    train_step = optimizer.minimize(loss_node)

    ### init the graph
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    # Initialize the variables (i.e. assign their default value)
    init = tf.global_variables_initializer()

    with tf.name_scope('saver'):
        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(logdir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Continue training with old model")
            saver.restore(sess, ckpt.model_checkpoint_path)
            for i, var in enumerate(saver._var_list):
                print('Var {}: {}'.format(i, var))

    checkfile = os.path.join(logdir, 'tree_network.ckpt')

    print("Begin training..........")
    num_batches = len(train_trees) // batch_size + (
        1 if len(train_trees) % batch_size != 0 else 0)
    for epoch in range(1, epochs + 1):
        bar = progressbar.ProgressBar(maxval=len(train_trees),
                                      widgets=[
                                          progressbar.Bar('=', '[', ']'), ' ',
                                          progressbar.Percentage()
                                      ])
        bar.start()
        for i, batch in enumerate(
                sampling.batch_samples(
                    sampling.gen_samples(train_trees, labels, embeddings,
                                         embedding_lookup), batch_size)):
            nodes, children, batch_labels = batch
            step = (epoch - 1) * num_batches + i * batch_size

            if not nodes:
                continue
            _, err, out = sess.run(
                [train_step, loss_node, out_node],
                feed_dict={
                    nodes_node: nodes,
                    children_node: children,
                    labels_node: batch_labels
                })
            bar.update(i + 1)
        bar.finish()

        correct_labels = []
        predictions = []
        logits = []
        for batch in sampling.batch_samples(
                sampling.gen_samples(val_trees, labels, embeddings,
                                     embedding_lookup), 1):
            nodes, children, batch_labels = batch
            output = sess.run([out_node],
                              feed_dict={
                                  nodes_node: nodes,
                                  children_node: children
                              })
            correct_labels.append(np.argmax(batch_labels))
            predictions.append(np.argmax(output))
            logits.append(output)

        target_names = list(labels)
        acc = accuracy_score(correct_labels, predictions)
        if (acc > max_acc):
            max_acc = acc
            saver.save(sess, checkfile)
            np.save(opt.model_path + '/logits', np.array(logits))
            np.save(opt.model_path + '/correct', np.array(correct_labels))

        print('Epoch', str(epoch), 'Accuracy:', acc, 'Max Acc: ', max_acc)
        csv_log.write(str(epoch) + ',' + str(acc) + ',' + str(max_acc) + '\n')

    print("Finish all iters, storring the whole model..........")
Exemple #22
0
        # if the results do not match
        else:
            print("Query: " + str(each))
            print("Intervals: " + str(arr))
    # returns the stats
    return [(acc / queryNum) * 100, sum(bruteTime), sum(segmentTime)]


if __name__ == "__main__":
    n = 20000  # number of different tests
    err = 0  # number of errors
    bruteForceTime = []  # storing the time stats
    segmentTreeTime = []  # storing the time stats
    # instantiating the progress bar
    bar             = progressbar.ProgressBar(maxval=n, \
    widgets         = [progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
    print("Running " + str(n) + " Randomized Tests...")
    bar.start()
    # runnning for values of n
    for i in range(1, n, 50):
        bar.update(i + 1)
        var = randomTest(i)
        acc = var[0]
        bruteTime = var[1]
        segmentTime = var[2]
        bruteForceTime.append(bruteTime)
        segmentTreeTime.append(segmentTime)
        # if the results for 100 queries do not match
        if acc != 100:
            print("Something Went Wrong!!!")
            err = 1
Exemple #23
0
    def search_query(self):
        @retry(elasticsearch.exceptions.ConnectionError, tries=TIMES_TO_TRY)
        def next_scroll(scroll_id):
            return self.es_conn.scroll(scroll=self.scroll_time, scroll_id=scroll_id)
        search_args = dict(
            index=','.join(self.opts.index_prefixes),
            scroll=self.scroll_time,
            size=self.opts.scroll_size,
            terminate_after=self.opts.max_results
        )

        if self.opts.doc_types:
            search_args['doc_type'] = self.opts.doc_types

        if self.opts.query.startswith('@'):
            query_file = self.opts.query[1:]
            if os.path.exists(query_file):
                with open(query_file, 'r') as f:
                    self.opts.query = f.read()
            else:
                print('No such file: %s' % query_file)
                exit(1)
        if self.opts.raw_query:
            try:
                query = json.loads(self.opts.query)
            except ValueError as e:
                print('Invalid JSON syntax in query. %s' % e)
                exit(1)
            search_args['body'] = query
        else:
            query = self.opts.query if not self.opts.tags else '%s AND tags:%s' % (
                self.opts.query, '(%s)' % ' AND '.join(self.opts.tags))
            search_args['q'] = query

        if '_all' not in self.opts.fields:
            search_args['_source_include'] = ','.join(self.opts.fields)
            self.csv_headers.extend([field for field in self.opts.fields if '*' not in field])

        if self.opts.debug_mode:
            print('Using these indices: %s' % ', '.join(self.opts.index_prefixes))
            print('Query[%s]: %s' % (('Query DSL', json.dumps(query)) if self.opts.raw_query else ('Lucene', query)))
            print('Output field(s): %s' % ', '.join(self.opts.fields))

        res = self.es_conn.search(**search_args)

        self.num_results = res['hits']['total']

        print('Found %s results' % self.num_results)
        if self.opts.debug_mode:
            print(json.dumps(res))

        if self.num_results > 0:
            open(self.opts.output_file, 'w').close()
            open(self.tmp_file, 'w').close()

            hit_list = []
            total_lines = 0

            widgets = ['Run query ',
                       progressbar.Bar(left='[', marker='#', right=']'),
                       progressbar.FormatLabel(' [%(value)i/%(max)i] ['),
                       progressbar.Percentage(),
                       progressbar.FormatLabel('] [%(elapsed)s] ['),
                       progressbar.ETA(), '] [',
                       progressbar.FileTransferSpeed(unit='docs'), ']'
                       ]
            bar = progressbar.ProgressBar(widgets=widgets, maxval=self.num_results).start()

            while total_lines != self.num_results:
                if res['_scroll_id'] not in self.scroll_ids:
                    self.scroll_ids.append(res['_scroll_id'])

                if not res['hits']['hits']:
                    print('Scroll[%s] expired(multiple reads?). Saving loaded data.' % res['_scroll_id'])
                    break
                for hit in res['hits']['hits']:
                    total_lines += 1
                    bar.update(total_lines)
                    hit_list.append(hit)
                    if len(hit_list) == FLUSH_BUFFER:
                        self.flush_to_file(hit_list)
                        hit_list = []
                    if self.opts.max_results:
                        if total_lines == self.opts.max_results:
                            self.flush_to_file(hit_list)
                            print('Hit max result limit: %s records' % self.opts.max_results)
                            return
                res = next_scroll(res['_scroll_id'])
            self.flush_to_file(hit_list)
            bar.finish()
Exemple #24
0
    def __backpropagation_loop(self):
        # backpropagation loop
        # epoch count starts at one
        epoch = 1
        training_errors = []
        repeat = True
        target_training_error_reached = False

        if self.params['validating']:
            validation_errors = []
            validation_error_best = 1000.0
            training_error_best = 0.0
            epoch_best = 0

        # initialise progress bar for console
        progress_bar = progressbar.ProgressBar(
            maxval=self.params['max_epochs'],
            widgets=[progressbar.Bar(
                '=', '[', ']'), ' ', progressbar.Percentage()])
        progress_bar.start()
        while repeat:
            training_error = 0.0
            progress_bar.update(epoch)

            for pattern in self.training_patterns:
                # load pattern
                input_pattern = pattern[:self.params['input_dimensions']]
                # set bias 'output'
                outputs_l_j = mlp_functions.initialise_bias(self.params)
                # add input pattern to 'output' of layer 0
                outputs_l_j[0].extend(input_pattern)

                # forward pass
                outputs_l_j = mlp_functions.forward_pass(
                    self.params, self.neurons_l, self.weights_l_i_j,
                    outputs_l_j)

                # update training_error
                output_pattern = pattern[self.params['input_dimensions']:
                                         self.last_output]
                teacher_i = []
                # account for i = 0
                teacher_i.append(None)
                teacher_i.extend(output_pattern)

                training_error = mlp_functions.update_ms_error(
                    self.neurons_l, training_error, teacher_i, outputs_l_j)

                # calculate errors
                errors_l_i = mlp_functions.calculate_errors(
                    self.params, self.neurons_l, self.weights_l_i_j,
                    teacher_i, outputs_l_j)

                # update weights
                self.weights_l_i_j = mlp_functions.update_weights(
                    self.params, self.neurons_l, self.weights_l_i_j,
                    errors_l_i, outputs_l_j)

            # calculate rms training error
            training_error = mlp_functions.calculate_rms_error(
                self.params['output_function'],
                training_error,
                self.neurons_l[-1],
                len(self.training_patterns)
            )

            # write out epoch training_error
            training_errors.append(training_error)

            # Write out weights and errors if specified
            if self.params['save_network']:
                if epoch % self.params['save_network_resolution'] == 0:
                    # append results to file
                    headers = (['epoch'] +
                               ['weight_%s_%s_%s' % (l+1, i+1, j)
                                for l in range(len(self.weights_l_i_j[1:]))
                                for i in range(len(self.weights_l_i_j[l+1][1:]))
                                for j in range(len(self.weights_l_i_j[l+1][i+1]))] +
                               ['error_%s_%s' % (l+1, i+1)
                                for l in range(len(errors_l_i[1:]))
                                for i in range(len(errors_l_i[l+1][1:]))])

                    result = [epoch]
                    result.extend(
                        [j for l in range(len(self.weights_l_i_j[1:]))
                         for i in range(len(self.weights_l_i_j[l+1][1:]))
                         for j in self.weights_l_i_j[l+1][i+1]])
                    result.extend(
                        [i for l in range(len(errors_l_i[1:]))
                         for i in errors_l_i[l+1][1:]])

                    io_functions.write_result_row(
                        'results/%s_weights.csv' % self.results_filename, headers, result)

            if self.params['validating']:
                validation_error = 0.0

                for pattern in self.validation_patterns:
                    # load pattern
                    input_pattern = pattern[:self.params['input_dimensions']]
                    # set bias 'output'
                    outputs_l_j = mlp_functions.initialise_bias(self.params)
                    # add input pattern to 'output' of layer 0
                    outputs_l_j[0].extend(input_pattern)

                    # forward pass
                    outputs_l_j = mlp_functions.forward_pass(
                        self.params, self.neurons_l, self.weights_l_i_j,
                        outputs_l_j)

                    # update validation error
                    output_pattern = pattern[self.params['input_dimensions']:
                                             self.last_output]

                    teacher_i = []
                    # account for i = 0
                    teacher_i.append(None)
                    teacher_i.extend(output_pattern)
                    validation_error = mlp_functions.update_ms_error(
                        self.neurons_l, validation_error, teacher_i,
                        outputs_l_j)

                # calculate rms validation error
                validation_error = mlp_functions.calculate_rms_error(
                    self.params['output_function'],
                    validation_error,
                    self.neurons_l[-1],
                    len(self.validation_patterns)
                )

                # make sure validation error is dropping
                if validation_error < validation_error_best:
                    validation_error_best = validation_error
                    best_weights_l_i_j = list(self.weights_l_i_j)
                    epoch_best = epoch

                validation_errors.append(validation_error)

            # record when target training error was reached
            if not target_training_error_reached:
                epoch_target_training_error = epoch
                if training_error < self.target_training_error:
                    target_training_error_reached = True

            # network training halting conditions
            if self.params['stop_at_target_training_error']:
                if (training_error < self.target_training_error or
                        epoch == self.params['max_epochs']):
                    repeat = False
            else:
                if epoch == self.params['max_epochs']:
                    repeat = False

            # finally, increment the epoch
            epoch += 1

        # reverse effects of standardiser on error when we only have a single output
        # only modifies error with numeric standardised outputs and scaled outputs
        if self.params['output_dimensions'] == 1:
            if data_processing.is_scale_type(self.variable_types[-1]):
                training_destandardiser_error = data_processing.Destandardiser(
                    [[item] for item in training_errors],
                    [self.variable_types[-1]])
                training_destandardiser_error.destandardise_by_type()
                training_errors = [
                    item[0] for item in training_destandardiser_error.patterns_out]
                if self.params['validating']:
                    validation_destandardiser_error = data_processing.Destandardiser(
                        [[item] for item in validation_errors],
                        [self.variable_types[-1]])
                    validation_destandardiser_error.destandardise_by_type()
                    validation_errors = [
                        item[0] for item in validation_destandardiser_error.patterns_out]
            elif self.variable_types[-1] == 'numeric':
                training_destandardiser_error = data_processing.Destandardiser(
                    [[item] for item in training_errors],
                    [self.variable_types[-1]],
                    variables_mean=[0],
                    variables_std=[self.training_standardiser.variables_std[-1]])
                training_destandardiser_error.destandardise_by_type()
                training_errors = [
                    item[0] for item in training_destandardiser_error.patterns_out]
                if self.params['validating']:
                    validation_destandardiser_error = data_processing.Destandardiser(
                        [[item] for item in validation_errors],
                        [self.variable_types[-1]],
                        variables_mean=[0],
                        variables_std=[self.training_standardiser.variables_std[-1]])
                    validation_destandardiser_error.destandardise_by_type()
                    validation_errors = [
                        item[0] for item in validation_destandardiser_error.patterns_out]

        # data for summary results
        self.training_errors = training_errors
        self.epoch_end = epoch - 1 # subtract one as increment occurs before while loop ends
        self.epoch_target_training_error = epoch_target_training_error
        self.training_error_end = training_errors[-1]

        if self.params['validating']:
            self.best_weights_l_i_j = best_weights_l_i_j
            self.validation_errors = validation_errors
            self.validation_error_end = validation_errors[-1]
            self.training_error_best = training_errors[epoch_best - 1] # epoch indexed from 1
            self.validation_error_best = validation_errors[epoch_best - 1] # epoch indexed from 1
            self.epoch_best = epoch_best

        # write out detailed results if specified
        if self.params['save_detailed']:
            headers = (['epoch'] +
                       ['training_error'] +
                       ['validation_error']
                      )
            for epoch_index, training_error in enumerate(training_errors):
                result = []
                if self.params['validating']:
                    result.append(epoch_index + 1) # start epoch count at one
                    result.append(training_error)
                    result.append(validation_errors[epoch_index])
                else:
                    result.append(epoch_index + 1)
                    result.append(training_error)

                io_functions.write_result_row(
                    'results/%s_detailed.csv' % self.results_filename, headers, result)
print("[INFO] loading network...")
model = VGG16(weights="imagenet", include_top=False)

# initialize the HDF5 dataset writer, then store the class label
# names in the dataset
dataset = HDF5DatasetWriter((len(imagePaths), 512 * 7 * 7),
                            args["output"],
                            dataKey="features",
                            bufSize=args["buffer_size"])
dataset.storeClassLabels(le.classes_)

# initialize the progress bar
widgets = [
    "Extracting Features: ",
    progressbar.Percentage(), " ",
    progressbar.Bar(), " ",
    progressbar.ETA()
]
pbar = progressbar.ProgressBar(maxval=len(imagePaths), widgets=widgets).start()

# loop over the images in patches
for i in np.arange(0, len(imagePaths), bs):
    # extract the batch of images and labels, then initialize the
    # list of actual images that will be passed through the network
    # for feature extraction
    batchPaths = imagePaths[i:i + bs]
    batchLabels = labels[i:i + bs]
    batchImages = []

    # loop over the images and labels in the current batch
    for (j, imagePath) in enumerate(batchPaths):
def train_model(model, encoder_frnn, encoder_rrnn, decoder_rnn, train_lemmas,
                train_feat_dicts, train_words, dev_lemmas, dev_feat_dicts,
                dev_words, alphabet_index, inverse_alphabet_index, epochs,
                optimization, results_file_path, morph_index,
                train_aligned_pairs, dev_aligned_pairs, feat_index,
                feature_types):
    print 'training...'

    np.random.seed(17)
    random.seed(17)

    if optimization == 'ADAM':
        trainer = AdamTrainer(model,
                              lam=REGULARIZATION,
                              alpha=LEARNING_RATE,
                              beta_1=0.9,
                              beta_2=0.999,
                              eps=1e-8)
    elif optimization == 'MOMENTUM':
        trainer = MomentumSGDTrainer(model)
    elif optimization == 'SGD':
        trainer = SimpleSGDTrainer(model)
    elif optimization == 'ADAGRAD':
        trainer = AdagradTrainer(model)
    elif optimization == 'ADADELTA':
        trainer = AdadeltaTrainer(model)
    else:
        trainer = SimpleSGDTrainer(model)

    total_loss = 0
    best_avg_dev_loss = 999
    best_dev_accuracy = -1
    best_train_accuracy = -1
    patience = 0
    train_len = len(train_words)
    epochs_x = []
    train_loss_y = []
    dev_loss_y = []
    train_accuracy_y = []
    dev_accuracy_y = []

    # progress bar init
    widgets = [progressbar.Bar('>'), ' ', progressbar.ETA()]
    train_progress_bar = progressbar.ProgressBar(widgets=widgets,
                                                 maxval=epochs).start()
    avg_loss = -1

    for e in xrange(epochs):

        # randomize the training set
        indices = range(train_len)
        random.shuffle(indices)
        train_set = zip(train_lemmas, train_feat_dicts, train_words,
                        train_aligned_pairs)
        train_set = [train_set[i] for i in indices]

        # compute loss for each example and update
        for i, example in enumerate(train_set):
            lemma, feats, word, alignment = example
            loss = one_word_loss(model, encoder_frnn, encoder_rrnn,
                                 decoder_rnn, lemma, feats, word,
                                 alphabet_index, alignment, feat_index,
                                 feature_types)
            loss_value = loss.value()
            total_loss += loss_value
            loss.backward()
            trainer.update()
            if i > 0:
                avg_loss = total_loss / float(i + e * train_len)
            else:
                avg_loss = total_loss

        if EARLY_STOPPING:

            # get train accuracy
            train_predictions = predict_templates(
                model, decoder_rnn, encoder_frnn, encoder_rrnn, alphabet_index,
                inverse_alphabet_index, train_lemmas, train_feat_dicts,
                feat_index, feature_types)
            print 'train:'
            train_accuracy = evaluate_model(train_predictions, train_lemmas,
                                            train_feat_dicts, train_words,
                                            feature_types, False)[1]

            if train_accuracy > best_train_accuracy:
                best_train_accuracy = train_accuracy

            dev_accuracy = 0
            avg_dev_loss = 0

            if len(dev_lemmas) > 0:

                # get dev accuracy
                dev_predictions = predict_templates(model, decoder_rnn,
                                                    encoder_frnn, encoder_rrnn,
                                                    alphabet_index,
                                                    inverse_alphabet_index,
                                                    dev_lemmas, dev_feat_dicts,
                                                    feat_index, feature_types)
                print 'dev:'
                # get dev accuracy
                dev_accuracy = evaluate_model(dev_predictions, dev_lemmas,
                                              dev_feat_dicts, dev_words,
                                              feature_types, False)[1]

                if dev_accuracy > best_dev_accuracy:
                    best_dev_accuracy = dev_accuracy

                    # save best model to disk
                    save_pycnn_model(model, results_file_path, morph_index)
                    print 'saved new best model'
                    patience = 0
                else:
                    patience += 1

                # found "perfect" model
                if dev_accuracy == 1:
                    train_progress_bar.finish()
                    if not PARALLELIZE:
                        plt.cla()
                    return model

                # get dev loss
                total_dev_loss = 0
                for i in xrange(len(dev_lemmas)):
                    total_dev_loss += one_word_loss(
                        model, encoder_frnn, encoder_rrnn, decoder_rnn,
                        dev_lemmas[i], dev_feat_dicts[i], dev_words[i],
                        alphabet_index, dev_aligned_pairs[i], feat_index,
                        feature_types).value()

                avg_dev_loss = total_dev_loss / float(len(dev_lemmas))
                if avg_dev_loss < best_avg_dev_loss:
                    best_avg_dev_loss = avg_dev_loss

                print 'epoch: {0} train loss: {1:.2f} dev loss: {2:.2f} dev accuracy: {3:.2f} train accuracy = {4:.2f} \
 best dev accuracy {5:.2f} best train accuracy: {6:.2f} patience = {7}'.format(
                    e, avg_loss, avg_dev_loss, dev_accuracy, train_accuracy,
                    best_dev_accuracy, best_train_accuracy, patience)

                if patience == MAX_PATIENCE:
                    print 'out of patience after {0} epochs'.format(str(e))
                    # TODO: would like to return best model but pycnn has a bug with save and load. Maybe copy via code?
                    # return best_model[0]
                    train_progress_bar.finish()
                    if not PARALLELIZE:
                        plt.cla()
                    return model
            else:

                # if no dev set is present, optimize on train set
                print 'no dev set for early stopping, running all epochs until perfectly fitting or patience was \
                reached on the train set'

                if train_accuracy > best_train_accuracy:
                    best_train_accuracy = train_accuracy

                    # save best model to disk
                    save_pycnn_model(model, results_file_path, morph_index)
                    print 'saved new best model'
                    patience = 0
                else:
                    patience += 1

                print 'epoch: {0} train loss: {1:.2f} train accuracy = {2:.2f} best train accuracy: {3:.2f} \
                patience = {4}'.format(e, avg_loss, train_accuracy,
                                       best_train_accuracy, patience)

                # found "perfect" model on train set or patience has reached
                if train_accuracy == 1 or patience == MAX_PATIENCE:
                    train_progress_bar.finish()
                    if not PARALLELIZE:
                        plt.cla()
                    return model

            # update lists for plotting
            train_accuracy_y.append(train_accuracy)
            epochs_x.append(e)
            train_loss_y.append(avg_loss)
            dev_loss_y.append(avg_dev_loss)
            dev_accuracy_y.append(dev_accuracy)

        # finished epoch
        train_progress_bar.update(e)
        if not PARALLELIZE:
            with plt.style.context('fivethirtyeight'):
                p1, = plt.plot(epochs_x, dev_loss_y, label='dev loss')
                p2, = plt.plot(epochs_x, train_loss_y, label='train loss')
                p3, = plt.plot(epochs_x, dev_accuracy_y, label='dev acc.')
                p4, = plt.plot(epochs_x, train_accuracy_y, label='train acc.')
                plt.legend(loc='upper left', handles=[p1, p2, p3, p4])
            plt.savefig(results_file_path + '_' + morph_index + '.png')
    train_progress_bar.finish()
    if not PARALLELIZE:
        plt.cla()
    print 'finished training. average loss: ' + str(avg_loss)
    return model
def main(argv):
    output_file = None
    shp_path = None
    for opt, arg in argv:
        if opt in ("--input-path"):
            shp_path = arg
        elif opt in ("-o", "--output-file"):
            output_file = codecs.open(arg, "w", "utf_8_sig")
        elif opt in ("-h", "--help"):
            printHelp()

    if output_file is None or shp_path is None:
        printHelp()

    shpfile = dbf.Dbf(shp_path + "/Streets.dbf")
    rdms = dbf.Dbf(shp_path + "/Rdms.dbf")
    zlevels = openShapefile(shp_path + "/Zlevels")
    cdms = dbf.Dbf(shp_path + "/Cdms.dbf")

    nodes_file_ = tempfile.NamedTemporaryFile()
    nodes_file = codecs.open(nodes_file_.name, "r+w", "utf-8")
    ways_file_ = tempfile.NamedTemporaryFile()
    ways_file = codecs.open(ways_file_.name, "r+w", "utf-8")

    relations_file = []
    relations_file.append(tempfile.NamedTemporaryFile())
    relations_file.append(tempfile.NamedTemporaryFile())
    relations_file.append(tempfile.NamedTemporaryFile())
    relations_file.append(tempfile.NamedTemporaryFile())
    relations_file.append(tempfile.NamedTemporaryFile())

    widgets = [
        'Importing data from NavTeq Shapes: ',
        progressbar.Bar(marker=progressbar.AnimatedMarker()), ' ',
        progressbar.ETA()
    ]
    maxval = 2 * len(shpfile) + zlevels.numRecords
    progress = progressbar.ProgressBar(widgets=widgets, maxval=maxval).start()
    progress.update(0)

    process(zlevels, rdms, cdms, shpfile, nodes_file, ways_file, progress,
            relations_file)

    if not progress is None:
        progress.finish()

    #Free memory:
    shpfile = None
    progress = None
    rdms = None
    n = None
    via = None
    record = None
    restriction_id = None
    restriction_type = None
    way_from = None
    way_to = None

    global numlines
    progress = progressbar.ProgressBar(widgets=[
        'Writing data to file .osm, please wait: ',
        progressbar.Percentage(),
        progressbar.Bar(marker=progressbar.AnimatedMarker()), ' ',
        progressbar.ETA()
    ],
                                       maxval=numlines + 10).start()

    progress.update(0)
    output_file.write("<?xml version='1.0' encoding='UTF-8'?>")
    output_file.write('\n')
    output_file.write(" <osm version='0.6' generator='navteq2osm'>")
    output_file.write('\n')
    output_file.flush()

    nodes_file.flush()
    nodes_file.seek(0)
    while 1:
        lines = nodes_file.readlines(200)
        if not lines:
            break
        else:
            for line in lines:
                output_file.write(line)
                try:
                    progress.update(progress.currval + 1)
                except:
                    pass
                if random.random() > 0.8:
                    output_file.flush()
    nodes_file.close()
    nodes_file_.close()

    ways_file.flush()
    ways_file.seek(0)
    while 1:
        lines = ways_file.readlines(200)
        if not lines:
            break
        else:
            for line in lines:
                output_file.write(line)
                try:
                    progress.update(progress.currval + 1)
                except:
                    pass
                if random.random() > 0.8:
                    output_file.flush()
    ways_file.close()
    ways_file_.close()

    for rfile in relations_file:
        rfile.flush()
        rfile.seek(0)
        while 1:
            lines = rfile.readlines(200)
            if not lines:
                break
            else:
                for line in lines:
                    output_file.write(line)
                    try:
                        progress.update(progress.currval + 1)
                    except:
                        pass
                    if random.random() > 0.8:
                        output_file.flush()
        rfile.close()

    output_file.write(' </osm>')
    output_file.write('\n')
    output_file.flush()
    output_file.close()
    progress.finish()
Exemple #28
0
def embed(num_epoch, coauthor_graph, author_work_graph, work_graph,
        bpr_optimizer, pp_sampler, pd_sampler, dd_sampler, ground_truth_dict,
        evaluation, sampler_method='uniform'):

    num_nnz = work_graph.number_of_edges() + \
            coauthor_graph.number_of_edges() + \
            author_work_graph.number_of_edges()
    bpr_optimizer.init_model(coauthor_graph, work_graph)

    if sampler_method == 'uniform':
        for epoch in range(num_epoch):
            print('Epoch: {}'.format(epoch))
            bar = progressbar.ProgressBar(maxval=num_nnz, \
            widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
            bar.start()

            bpr_loss = 0.0
            for bu in range(num_nnz):
                """
                update embedding in person-person network
                update embedding in person-document network
                update embedding in doc-doc network
                """
                bar.update(bu)
                for i, j, t in pp_sampler.generate_triplet_uniform(coauthor_graph):
                    bpr_optimizer.update_pp_gradient(i, j, t)
                    bpr_loss += bpr_optimizer.compute_pp_loss(i, j, t)

                for i, j, t in pd_sampler.generate_triplet_uniform(author_work_graph, coauthor_graph, work_graph):
                    bpr_optimizer.update_pd_gradient(i, j, t)
                    bpr_loss += bpr_optimizer.compute_pd_loss(i, j, t)

                for i, j, t in dd_sampler.generate_triplet_uniform(work_graph):
                    bpr_optimizer.update_dd_gradient(i, j, t)
                    bpr_loss += bpr_optimizer.compute_dd_loss(i, j, t)
            bar.finish()
            average_loss = float(bpr_loss) / num_nnz
            print('\n Average BPR loss: {}'.format(average_loss))
            average_f1 = evaluation(ground_truth_dict, bpr_optimizer)
            print('F1: {} \n'.format(average_f1))


    elif sampler_method == 'reject':
        for _ in range(num_epoch):
            #bpr_loss = 0.0
            for _ in xrange(0, num_nnz):
                """
                update embedding in person-person network
                update embedding in person-document network
                update embedding in doc-doc network
                """
                for i, j, t in pp_sampler.generate_triplet_reject(dataset, bpr_optimizer):
                    bpr_optimizer.update_pp_gradient(i, j, t)
                    #bpr_loss += bpr_optimizer.compute_pp_loss(i, j, t)

                for i, j, t in pd_sampler.generate_triplet_reject(dataset, bpr_optimizer):
                    bpr_optimizer.update_pd_gradient(i, j, t)
                    #bpr_loss += bpr_optimizer.compute_pd_loss(i, j, t)

                for i, j, t in dd_sampler.generate_triplet_reject(dataset, bpr_optimizer):
                    bpr_optimizer.update_dd_gradient(i, j, t)
                    #bpr_loss += bpr_optimizer.compute_dd_loss(i, j, t)

    elif sampler_method == 'adaptive':
        for _ in xrange(0, num_epoch):
            #bpr_loss = 0.0
            for _ in xrange(0, num_nnz):
                """
                update embedding in person-person network
                update embedding in person-document network
                update embedding in doc-doc network
                """
                for i, j, t in pp_sampler.generate_triplet_adaptive(dataset, bpr_optimizer):
                    bpr_optimizer.update_pp_gradient(i, j, t)
                    #bpr_loss += bpr_optimizer.compute_pp_loss(i, j, t)

                for i, j, t in pd_sampler.generate_triplet_adaptive(dataset, bpr_optimizer):
                    bpr_optimizer.update_pd_gradient(i, j, t)
                    #bpr_loss += bpr_optimizer.compute_pd_loss(i, j, t)

                for i, j, t in dd_sampler.generate_triplet_adaptive(dataset, bpr_optimizer):
                    bpr_optimizer.update_dd_gradient(i, j, t)
from collections import OrderedDict
from sklearn.cluster import DBSCAN
import numpy as np
import scipy.linalg as sl
import matplotlib.pyplot as plt
from matplotlib.path import Path
from shapely.geometry import LineString
import json
import os.path
from subprocess import call

Panoptes.connect()
widgets = [
    'Aggregate: ',
    pb.Percentage(), ' ',
    pb.Bar(marker='0', left='[', right=']'), ' ',
    pb.ETA()
]

# metadata on each subject is in this format
metadata_dtype = [('ra', '>f4'), ('dec', '>f4'), ('MANGAID', 'S11'),
                  ('IAUNAME', 'S19'), ('IFUDESIGNSIZE', '>f8'),
                  ('#MANGA_TILEID', '>f8'), ('NSAID', '>i8'),
                  ('explorer_link', 'S90'), ('GZ2_total_classifications',
                                             '>i2'), ('GZ2_bar_votes', '>i2'),
                  ('GZ2_spiral_votes', '>i2'), ('specobjid', '>i8'),
                  ('dr7objid', '>i8'), ('dr8objid', '>i8'),
                  ('gz2_sample', 'S70')]


def define_wcs(ra, dec, scale=0.099, size_pix=np.array([525, 525])):
Exemple #30
0
def main():
    seeding()
    parallel_envs = 4
    number_of_episodes = 1000
    episode_length = 80
    batchsize = 1000
    save_interval = 1000
    t = 0

    # amplitude of OU noise, which slowly decreases to 0
    noise = 2
    noise_reduction = 0.9999

    # how many episodes before update
    episode_per_update = 2 * parallel_envs

    log_path = os.getcwd() + "/log"
    model_dir = os.getcwd() + "/model_dir"

    os.makedirs(model_dir, exist_ok=True)

    torch.set_num_threads(parallel_envs)
    """
    `env` controls three agents, two blue, one red.
    env.observation_space: [Box(14,), Box(14,), Box(14,)]
    env.action_sapce: [Box(2,), Box(2,), Box(2,)]
    Box(14,) can be broken down into 2+3*2+3*2=14
    (2) location coordinates of the target landmark
    (3*2) the three agents' positions w.r.t. the target landmark
    (3*2) the three agents' velocities w.r.t. the target landmark
    """
    env = envs.make_parallel_env(parallel_envs)

    # keep 5000 episodes worth of replay
    buffer = ReplayBuffer(int(5000 * episode_length))

    # initialize policy and critic
    maddpg = MADDPG()
    logger = SummaryWriter(log_dir=log_path)
    agent0_reward = []
    agent1_reward = []
    agent2_reward = []

    # training loop
    # show progressbar
    import progressbar as pb
    widget = [
        'episode: ',
        pb.Counter(), '/',
        str(number_of_episodes), ' ',
        pb.Percentage(), ' ',
        pb.ETA(), ' ',
        pb.Bar(marker=pb.RotatingMarker()), ' '
    ]

    timer = pb.ProgressBar(widgets=widget, maxval=number_of_episodes).start()

    # use keep_awake to keep workspace from disconnecting
    for episode in keep_awake(range(0, number_of_episodes, parallel_envs)):

        timer.update(episode)

        reward_this_episode = np.zeros((parallel_envs, 3))
        # Consult `env_wrapper.py` line 19.
        all_obs = env.reset()
        """
        `all_abs` is a list of size `parallel_envs`,
        each item in the list is another list of size two,
        first is env.observation_space: [Box(14,), Box(14,), Box(14,)],
        second is [Box(14,)], which is added to faciliate training
        https://goo.gl/Xtr6sF
        `obs` and `obs_full` are both lists of size `parallel_envs`,
        `obs` has the default observation space [Box(14,), Box(14,), Box(14,)]
        `obs_full` has the compounded observation space [Box(14,)]
        """
        obs, obs_full = transpose_list(all_obs)

        # for calculating rewards for one episode - addition of all time steps

        # save info or not
        save_info = ((episode) % save_interval < parallel_envs
                     or episode == number_of_episodes - parallel_envs)
        frames = []
        tmax = 0

        if save_info:
            frames.append(env.render('rgb_array'))

        for episode_t in range(episode_length):

            t += parallel_envs

            # explore = only explore for a certain number of steps
            # action input needs to be transposed
            actions = maddpg.act(transpose_to_tensor(obs), noise=noise)
            noise *= noise_reduction

            # `actions_array` has shape (3, parallel_envs, 2)
            actions_array = torch.stack(actions).detach().numpy()
            # `actions_for_env` has shape (parallel_envs, 3, 2), because
            # input to `step` requires the first index to be `parallel_envs`
            actions_for_env = np.rollaxis(actions_array, axis=1)

            # step forward one frame
            next_obs, next_obs_full, rewards, dones, info = \
                env.step(actions_for_env)

            # add data to buffer
            transition = (obs, obs_full, actions_for_env, rewards, next_obs,
                          next_obs_full, dones)

            buffer.push(transition)

            reward_this_episode += rewards

            obs, obs_full = next_obs, next_obs_full

            # save gif frame
            if save_info:
                frames.append(env.render('rgb_array'))
                tmax += 1

        # update the target network `parallel_envs`=4 times
        # after every `episode_per_update`=2*4
        if len(buffer
               ) > batchsize and episode % episode_per_update < parallel_envs:
            # update the local network for all agents, `a_i` refers to agent no.
            for a_i in range(3):
                samples = buffer.sample(batchsize)
                maddpg.update(samples, a_i, logger)
            # soft update the target network towards the actual networks
            maddpg.update_targets()

        for i in range(parallel_envs):
            agent0_reward.append(reward_this_episode[i, 0])
            agent1_reward.append(reward_this_episode[i, 1])
            agent2_reward.append(reward_this_episode[i, 2])

        if episode % 100 == 0 or episode == number_of_episodes - 1:
            avg_rewards = [
                np.mean(agent0_reward),
                np.mean(agent1_reward),
                np.mean(agent2_reward)
            ]
            agent0_reward = []
            agent1_reward = []
            agent2_reward = []
            for a_i, avg_rew in enumerate(avg_rewards):
                logger.add_scalar('agent%i/mean_episode_rewards' % a_i,
                                  avg_rew, episode)

        # Saves the model.
        save_dict_list = []
        if save_info:
            for i in range(3):
                save_dict = {
                    'actor_params':
                    maddpg.maddpg_agent[i].actor.state_dict(),
                    'actor_optim_params':
                    maddpg.maddpg_agent[i].actor_optimizer.state_dict(),
                    'critic_params':
                    maddpg.maddpg_agent[i].critic.state_dict(),
                    'critic_optim_params':
                    maddpg.maddpg_agent[i].critic_optimizer.state_dict()
                }
                save_dict_list.append(save_dict)

                torch.save(
                    save_dict_list,
                    os.path.join(model_dir, 'episode-{}.pt'.format(episode)))

            # Save gif files.
            imageio.mimsave(os.path.join(model_dir,
                                         'episode-{}.gif'.format(episode)),
                            frames,
                            duration=.04)

    env.close()
    logger.close()
    timer.finish()