Exemple #1
0
def test_sample_vehicle():
    c = opensees_default(bridge_705(0.5))
    c.vehicle_density = [(11.5, 0.7), (12.2, 0.2), (43, 0.1)]

    # Test a vehicles is returned.
    vehicle = sample_vehicle(c)
    print_d(D, vehicle)
    assert isinstance(vehicle, Vehicle)

    # Test noise is added.
    _, vehicle = sample_vehicle(c=c, pd_row=True)
    true_vehicle = c.vehicle_data.loc[vehicle.index]
    for col_name in noise_col_names:
        # DataFrame is only of length 1, still need .all applied.
        assert (vehicle.loc[vehicle.index, col_name] !=
                c.vehicle_data.loc[vehicle.index, col_name]).all()

    # Test noise is not added.
    c.perturb_stddev = 0
    _, vehicle = sample_vehicle(c=c, pd_row=True)
    true_vehicle = c.vehicle_data.loc[vehicle.index]
    for col_name in noise_col_names:
        # DataFrame is only of length 1, still need .all applied.
        assert (vehicle.loc[vehicle.index,
                            col_name] == c.vehicle_data.loc[vehicle.index,
                                                            col_name]).all()
Exemple #2
0
def opensees_to_stress_strain(
    path: str, parse_stress: bool, parse_strain: bool
) -> Tuple[Optional[List[SSTuple]], Optional[List[SSTuple]]]:
    """Return a tuple of stress and/or strain fem.

    For both stress and strain the value is None if the respective argument
    parse_stress or parse_strain is None. Otherwise the value in each case is a
    list of tuples (V, T, I), where V is the value of the response, T is the
    time of the simulation and I is the index of the measurement at that time.

    NOTE: This return type should really be a matrix, but is like this for a
    legacy reason, feel free to change/update it.

    """
    print_d(D, f"path = {path}")
    stress_strain = opensees_to_numpy(path)
    num_time = len(stress_strain)
    num_measurements = len(stress_strain[0]) // 2
    stress, strain = None, None
    if parse_stress:
        stress = [(stress_strain[time][i * 2], time, i)
                  for i in range(num_measurements) for time in range(num_time)]
    if parse_strain:
        strain = [(stress_strain[time][i * 2 + 1], time, i)
                  for i in range(num_measurements) for time in range(num_time)]
    return stress, strain
Exemple #3
0
    def response_to(
        self,
        x_frac: float,
        z_frac: float,
        load_x_frac: float,
        load: float,
        y_frac: float = 1,
        time_index: int = 0,
    ):
        """The response value in kN at a position to a load at a position.

        NOTE: only the loading position in longitudinal direction can be chosen,
        with 'load_x_frac', the position in transverse direction is fixed for a
        single ILExpt.

        Args:
            x_frac: float, response position on x-axis in [0 1].
            y_frac: float, response position on y-axis in [0 1].
            z_frac: float, response position on x-axis in [0 1].
            load_x_frac: float, load position on x-axis in [0 1].
            load: float, value of the load in kN.
            time_index: int, time index of the simulation.

        """
        assert 0 <= x_frac <= 1
        assert 0 <= load_x_frac <= 1
        print_d(D, f"x_frac = {x_frac} = load_x_frac = {load_x_frac}")
        response = super().sim_response(
            expt_frac=load_x_frac,
            x_frac=x_frac,
            y_frac=y_frac,
            z_frac=z_frac,
            time_index=time_index,
        )
        return response * (load / self.c.il_unit_load_kn)
Exemple #4
0
def convert_responses(
    c: Config, expt_params: List[SimParams], parsed_expt_responses: Parsed
) -> Dict[int, Dict[ResponseType, List["Response"]]]:
    """Parse fem from an OpenSees simulation."""
    print_d(D, f"Converting {c.bridge.dimensions} bridge fem")
    return convert_responses_3d(
        c=c, expt_params=expt_params, parsed_expt_responses=parsed_expt_responses,
    )
Exemple #5
0
def _vehicle_pdf_groups(vehicle_data: VehicleData, col: str,
                        lengths: List[int]):
    """Vehicle data grouped by a maximum value per group."""
    print_d(D, f"Vehicle PDF column is {repr(col)}")
    print_d(D, str(lengths))
    assert sorted(lengths) == lengths
    # TODO Better vehicles data format, should be meters.
    if col == "length":
        lengths = [length * 100 for length in lengths]

    def group_by(x):
        length = vehicle_data.loc[x, col]
        for i, l in enumerate(lengths):
            if length < l:
                return i

    return vehicle_data.groupby(by=group_by)
Exemple #6
0
    def plot_bridge_response(t):
        update_vehicles(t)
        plt.ylim(top=top, bottom=bottom)

        # Plot fem for each moving vehicles.
        for i in range(len(mv_vehicles)):
            t_vehicle_responses = responses[i][t]
            x_axis = c.bridge.x_axis_equi(len(t_vehicle_responses))

            # Plot fem per axle and one sum of fem.
            if per_axle:
                for axle in range(mv_vehicles[i].num_axles):
                    print_d(D, f"axle_num = {axle}")
                    plt.plot(
                        x_axis,
                        list(map(lambda x: x[axle], t_vehicle_responses)),
                        color=response_axle_color,
                        linewidth=1,
                    )
                print_d(D, f"Response per axle")
                plt.plot(
                    x_axis,
                    responses_per_vehicle[i][t],
                    color=response_color,
                    linewidth=1,
                )

            # Plot one response for the moving vehicles.
            else:
                plt.plot(x_axis, t_vehicle_responses)

        # Plot the bridge and vehicles.
        plot_bridge_deck_side(
            c.bridge,
            vehicles=vehicles,
            equal_axis=False,
            normalize_vehicle_height=True,
        )
        sci_format_y_axis()
        response_name = response_type.name().capitalize()
        plt.title(f"{response_name} at {t * c.time_step:.1f}s")
        plt.xlabel("x-axis (m)")
        plt.ylabel(f"{response_name} ({response_type.units()})")
        plt.gcf().set_size_inches(16, 10)
Exemple #7
0
def loads_to_traffic_array(c: Config, loads: List[List[PointLoad]]):
    """Convert a list of loads per timestep to a 'TrafficArray'."""
    times = len(loads)
    wheel_track_zs = c.bridge.wheel_track_zs(c)
    num_load_positions = c.il_num_loads * len(wheel_track_zs)
    traffic_array = np.zeros((times, num_load_positions))
    wheel_track_index_f = x_to_wheel_track_index(c)
    for time, time_loads in enumerate(loads):
        # For each load, find the wheel track it's on, and then fill the ULM.
        for load in time_loads:
            wheel_track_found = False
            for w, wheel_track_z in enumerate(wheel_track_zs):
                if not wheel_track_found and np.isclose(wheel_track_z, load.z):
                    wheel_track_found = True
                    print_d(D, f"load z = {load.z}")
                    print_d(D, f"load x = {load.x}")
                    x_ind = wheel_track_index_f(load.x)
                    j = (w * c.il_num_loads) + x_ind
                    print_d(D, f"x_ind = {x_ind}")
                    print_d(D, f"j = {j}")
                    traffic_array[time][j] += load.load
            if not wheel_track_found:
                raise ValueError(f"No wheel track for point load at z = {load.z}")
    return traffic_array
Exemple #8
0
def sample_vehicle(
    c: Config,
    group_index: int = None,
    noise_col_names: List[str] = [],
    pd_row: bool = False,
) -> Union[Vehicle, Tuple[Vehicle, pd.DataFrame]]:
    """Sample a vehicles from a c.vehicle_density group.

    Args:
        c: Config, config from which to load vehicles data and density info.
        init_group_index: int, sample from a given group index or all (None).
        noise_col_names: List[str], a list of columns to apply noise to.
        pd_row: bool, if true return a tuple of Vehicle and the corresponding
            row from the Pandas DataFrame, else return just a Vehicle.

    """
    # Select a vehicles group randomly, if no group is specified.
    if group_index is None:
        rand = np.random.uniform()
        # print(rand)
        # print_d(D, f"Vehicle PDF = {c.vehicle_pdf}")
        # Group's are tuples of group maximum and percentage of all groups.
        min, max = 0, c.vehicle_pdf[-1][0]
        print_d(D, f"rand = {rand}")
        print_d(D, f"min = {min}, max = {max}")
        running_fraction = 0
        # Iterate through group percentage's until the randomly selected one.
        # print(c.vehicle_pdf)
        for i, (_, group_fraction) in enumerate(c.vehicle_pdf):
            running_fraction += group_fraction
            # print(f"running fraction = {running_fraction}")
            # print(f"i = {i}, running_fraction = {running_fraction}")
            if rand < running_fraction:
                group_index = i
                break
    # print(D, f"group_index = {group_index}")

    # Sample a vehicles uniformly randomly from the group.
    groups_dict = {i: None for i in range(len(c.vehicle_pdf))}
    print_d(D, groups_dict.items())
    for i, (_, group) in enumerate(vehicle_pdf_groups(c)):
        # print(D, f"i = {i}")
        groups_dict[i] = group
    group = groups_dict[group_index]

    # print(f"group = {type(group)}")
    if group is None:
        print_w(f"Sampled group is None, resampling...")
        return sample_vehicle(c, group_index)
    sample = c.vehicle_data.loc[group.sample().index]

    # Add noise to the sample if requested.
    if c.perturb_stddev:
        # print(f"perturb")
        for col_name, (_, stddev) in zip(noise_col_names,
                                         noise_per_column(c, noise_col_names)):
            print_d(
                D,
                f"col_name = {col_name}, stddev = {stddev:.2f}" +
                f",{c.perturb_stddev} x stddev" +
                f" {c.perturb_stddev * stddev:.2f}",
            )
            noise = np.random.normal(loc=0, scale=c.perturb_stddev * stddev)
            print_d(D, f"before =\n{sample[col_name]},\nnoise = {noise}")
            sample[col_name] = sample[col_name] + noise
            print_d(D, f"after =\n{sample[col_name]}")

    # Convert sample to Vehicle and return it.
    row = sample.iloc[0]
    axle_distances = axle_array_and_count(row["axle_distance"])
    axle_weights = axle_array_and_count(row["weight_per_axle"])
    # TODO: Fix units in database.
    # print(axle_distances)
    # print(axle_weights)
    # print(row["total_weight"])
    vehicle = Vehicle(
        kmph=40,
        kn=axle_weights,
        axle_width=c.axle_width,
        axle_distances=np.array(axle_distances) / 100,
    )
    return (vehicle, sample) if pd_row else vehicle
Exemple #9
0
def apply_effect(
    c: Config,
    points: List[Point],
    responses: List[List[float]],
    effect: List[List[float]],
    speed_up: int = 1,
    repeat_responses: bool = False,
) -> List[float]:
    """Time series of effect due to temperature at given points.

    Returns: a NumPy array of shape the same as given fem. The effect due
        to temperature is interpolated across the date range of the given
        fem, this is calculated under the assumption that temperature
        effect is given at one data point per minute and that the sensor
        fem are given at a rate of 'c.sensor_hz'.

    """
    raise ValueError("Deprecated")
    assert len(responses) == len(points)
    # Convert the temperature data into temperature effect at each point.
    # effect_ = effect(c=c, response_type=response_type, points=points, temps=temps)
    assert len(effect) == len(points)
    # A temperature sample is available per minute. Here we calculate the
    # number of fem between each pair of recorded temperatures and the
    # number of temperature samples required for the given fem.
    len_per_min = get_len_per_min(c=c, speed_up=speed_up)
    print_i(f"Length per minute = {len_per_min}, speed_up = {speed_up}")
    num_temps_req = math.ceil(len(responses[0]) / len_per_min) + 1
    if num_temps_req > len(effect[0]):
        raise ValueError(f"Not enough temperatures ({len(effect[0])}) for data"
                         f" (requires {num_temps_req})")
    # If additional temperature data is available, then use it if requested and
    # repeat the given fem. Here we calculate length, in terms of the
    # sample frequency, recall that temperature is sampled every minute.
    avail_len = (len(effect[0]) - 1) * len_per_min
    if repeat_responses and (avail_len > len(responses[0])):
        print_i(
            f"Increasing length of fem from {len(responses[0])} to {avail_len}"
        )
        num_temps_req = len(effect[0])
        new_responses = np.empty((len(responses), avail_len))
        for i in range(len(responses)):
            for j in range(math.ceil(avail_len / len(responses[0]))):
                start = j * len(responses[0])
                end = min(avail_len - 1, start + len(responses[0]))
                new_responses[i][start:end] = responses[i][:end - start]
        responses = new_responses
    # Fill in the fem array with the temperature effect.
    result = np.zeros((len(points), len(responses[0])))
    for i in range(len(points)):
        for j in range(num_temps_req - 1):
            start = j * len_per_min
            end = min(len(result[i]), start + len_per_min)
            print_d(D, f"start = {start}")
            print_d(D, f"end = {end}")
            print_d(D, f"end - start = {end - start}")
            # print_d(D, f"temp_start, temp_end = {temps[j]}, {temps[j + 1]}")
            print_d(
                D,
                f"effect_start, effect_end = {effect[i][j]}, {effect[i][j + 1]}"
            )
            result[i][start:end] = np.linspace(effect[i][j], effect[i][j + 1],
                                               end - start)
    if repeat_responses:
        return responses, result
    return result
Exemple #10
0
def effect(
    c: Config,
    response_type: ResponseType,
    points: List[Point],
    temps_bt: Optional[Tuple[List[float], List[float]]] = None,
    len_per_hour: Optional[int] = None,
    temps: Optional[List[float]] = None,
    solar: Optional[List[float]] = None,
    d: bool = False,
    ret_temps_bt: bool = False,
) -> List[List[float]]:
    """Temperature effect at given points for a number of given temperatures.

    The result is of shape (number of points, number of temperatures).

    NOTE: The 'ThermalDamage' method 'to_strain' multiplies the results by E-6,
        which is called by this function. So take note that the strain values
        are already multiplied by E-6 (from microstrain to strain), and do not
        need to be resized.

    Args:
        c: Config, global configuration object.
        response_type: ResponseType, type of sensor response to temp. effect.
        points: List[Point], points at which to calculate temperature effect.
        temps_bt: A 2-tuple of arrays, the first array is for the temperatures
            at the bottom of the bridge, and the second array is for the
            temperatures at the top of the bridge. If this argument is given
            then 'temps', 'solar', 'len_per_hour' must not be given.
        len_per_hour: Optional[int], if given then temps and solar must also be
            given. The temperature fem are interpolated such that there
            are 'len_per_hour' fem for every hour of temperature data. It
            is assumed the temperature data is one data point per minute.
        temps: Optional[List[float]], first see 'len_per_hour'. Air temperature
            data given at one data point per minute.
        solar: Optional[List[float]], first see 'len_per_hour'. Solar irradiance
            data given at one data point per minute, same as 'temps'.

    """
    if temps_bt is not None:
        if any(x is not None for x in [len_per_hour, temps, solar]):
            raise ValueError(
                "Must only pass 'temps_bt', or ('len_per_hour', 'temps' & 'solar')"
            )

    original_c = c
    # Unit effect from uniform temperature loading.
    unit_uniform = ThermalScenario(axial_delta_temp=c.unit_axial_delta_temp_c)
    c, sim_params = unit_uniform.use(original_c)
    uniform_responses = load_fem_responses(
        c=c,
        sim_runner=OSRunner,
        response_type=response_type,
        sim_params=sim_params,
    )
    # Unit effect from linear temperature loading.
    unit_linear = ThermalScenario(moment_delta_temp=c.unit_moment_delta_temp_c)
    c, sim_params = unit_linear.use(original_c)
    linear_responses = load_fem_responses(
        c=c,
        sim_runner=OSRunner,
        response_type=response_type,
        sim_params=sim_params,
    )
    print_i("Loaded unit uniform and linear temperature fem")

    # Convert uniform fem to correct type (thermal post-processing).
    if response_type in [
            ResponseType.Strain,
            ResponseType.StrainT,
            ResponseType.StrainZZB,
    ]:
        uniform_responses = unit_uniform.to_strain(
            c=c, sim_responses=uniform_responses)
    elif response_type == ResponseType.Stress:
        uniform_responses = unit_uniform.to_stress(
            c=c, sim_responses=uniform_responses)
    unit_uniforms = np.array(uniform_responses.at_decks(points))
    print(f"Unit uniform temperature per point, shape = {unit_uniforms.shape}")

    # Convert linear fem to correct type (thermal post-processing).
    if response_type in [
            ResponseType.Strain,
            ResponseType.StrainT,
            ResponseType.StrainZZB,
    ]:
        linear_responses = unit_linear.to_strain(
            c=c, sim_responses=linear_responses)
    elif response_type == ResponseType.Stress:
        linear_responses = unit_linear.to_stress(
            c=c, sim_responses=linear_responses)
    unit_linears = np.array(linear_responses.at_decks(points))

    # Determine temperature gradient throughout the bridge.
    if temps_bt is None:
        temps_bottom, temps_top = temps_bottom_top(c=c,
                                                   temps=temps,
                                                   solar=solar,
                                                   len_per_hour=len_per_hour)
    else:
        temps_bottom, temps_top = temps_bt
        temps_bottom, temps_top = np.array(temps_bottom), np.array(temps_top)

    temps_half = (temps_bottom + temps_top) / 2
    temps_linear = temps_top - temps_bottom
    temps_uniform = temps_half - c.bridge.ref_temp_c

    # print(f"temps_bottom.shape = {temps_bottom.shape}")
    # print(f"temps_top.shape = {temps_top.shape}")
    # print(f"temps_half.shape = {temps_half.shape}")
    print_d(D, f"tb = {temps_bottom[:3]}")
    print_d(D, f"tt = {temps_top[:3]}")
    print_d(D, f"th = {temps_half[:3]}")
    print_d(D, f"temps linear = {temps_linear[:3]}")
    print_d(D, f"temps uniform = {temps_uniform[:3]}")

    # Combine uniform and linear fem.
    uniform_responses = np.array(
        [unit_uniform * temps_half for unit_uniform in unit_uniforms])
    linear_responses = np.array(
        [unit_linear * temps_linear for unit_linear in unit_linears])
    # print(f"uniform_responses.shape = {uniform_responses.shape}")
    # print(f"linear_responses.shape = {linear_responses.shape}")
    print_d(D, f"uniform fem = {uniform_responses[:3]}")
    print_d(D, f"linear fem = {linear_responses[:3]}")
    if d:
        return temps_uniform, temps_linear, uniform_responses + linear_responses
    if ret_temps_bt:
        return ((temps_bottom, temps_top),
                uniform_responses + linear_responses)
    return uniform_responses + linear_responses
Exemple #11
0
    def run(
        self,
        expt_params: List[SimParams],
        return_parsed: bool = False,
        return_converted: bool = False,
    ):
        """Run multiple simulations and save responses.

        TODO: Change ExptParams to SimParams.

        Args:
            expt_params: ExptParams, parameters for a number of simulations.
            return_parsed: bool, for testing, return parsed fem.
            return_converted: bool, for testing, return converted fem.

        """
        # Building.
        start = timer()
        expt_params = self._build(
            c=self.c,
            expt_params=expt_params,
            fem_runner=self,
        )
        print_i(f"FEMRunner: built {self.name} model file(s) in" +
                f" {timer() - start:.2f}s")

        # Running.
        for sim_ind, _ in enumerate(expt_params):
            start = timer()
            expt_params = self._run(self.c, expt_params, self, sim_ind)
            print_i(f"FEMRunner: ran {self.name}" +
                    f" {sim_ind + 1}/{len(expt_params)}" +
                    f" simulation in {timer() - start:.2f}s")

        # Parsing.
        start = timer()
        parsed_expt_responses = self._parse(self.c, expt_params, self)
        print_i(f"FEMRunner: parsed all fem in" + f" {timer() - start:.2f}s")
        if return_parsed:
            return parsed_expt_responses
        print(parsed_expt_responses[0].keys())

        # Converting.
        start = timer()
        converted_expt_responses = self._convert(
            c=self.c,
            expt_params=expt_params,
            parsed_expt_responses=parsed_expt_responses,
        )
        print_i(f"FEMRunner: converted all fem to [Response] in" +
                f" {timer() - start:.2f}s")
        if return_converted:
            return converted_expt_responses
        print(converted_expt_responses[0].keys())

        # Saving.
        for sim_ind in converted_expt_responses:
            print_d(D, f"sim_ind = {sim_ind}")
            for response_type, responses in converted_expt_responses[
                    sim_ind].items():
                print_d(D, f"response_type in converted = {response_type}")
                print(len(responses))
                fem_responses = SimResponses(
                    c=self.c,
                    sim_params=expt_params[sim_ind],
                    sim_runner=self,
                    response_type=response_type,
                    responses=responses,
                    build=False,
                )

                start = timer()
                fem_responses.save()
                print_i(
                    f"FEMRunner: saved simulation {sim_ind + 1} SimResponses" +
                    f" in ([Response]) in {timer() - start:.2f}s," +
                    f"({response_type})")