Note
Go to the end to download the full example code.
Examining paths#
In this example we show how to examine the paths which are saved to HDF by a run
Imports#
%load_ext autoreload %autoreload 2
sphinx_gallery_thumbnail_path = "../../examples/working_with_models/routed_path.png"
from pathlib import Path
import folium
import numpy as np
import pandas as pd
from polaris import Polaris
from polaris.utils.database.db_utils import read_and_close
Data Sources#
Open the demand database for analysis
model_fldr = Path("~/models/grid-small").expanduser()
project = Polaris.from_dir(Path(model_fldr))
from polaris.runs.results.h5_results import H5_Results
# iteration = model_fldr / "Grid_iteration_1_19"
# result_file = list(iteration.glob("*.h5"))[0]
result_file = "/home/jamie/tmp/DFW-29-Result.h5"
demand_file = "/home/jamie/models/ATL-lever-off/AtlantaMPO-Demand.sqlite"
result_file = "/home/jamie/models/ATL-lever-off/AtlantaMPO-Result.h5"
supply_file = "/home/jamie/models/ATL-lever-off/AtlantaMPO-Supply.sqlite"
results_atl = H5_Results(result_file)
from SVTrip.trips.polaris_reader import read_polaris_h5
from polaris.utils.database.db_utils import has_table, read_sql
links = read_sql('Select link as link_id, fspd_ab, fspd_ba, round("length", 2) "length" from link', supply_file)
df = read_polaris_h5(demand_file, result_file, links)
def load_all_paths(dir):
demand_file = first_and_only(dir.glob("*Demand.sqlite"))
result_file = first_and_only(dir.glob("*esult.h5*"))
supply_file = first_and_only(dir.glob("*Supply.sqlite"))
links = read_sql('Select link as link_id, fspd_ab, fspd_ba, round("length", 2) "length" from link', supply_file)
df = read_polaris_h5(demand_file, result_file, links)
df = pd.merge(df, links[["link_id", "length"]], on="link_id")
return df
def report_stats(df, dir):
df_vmt = df.groupby("mode_number").sum()[["length"]]
df_vmt["million_VMT_sampled"] = df_vmt["length"] / 1609.0 / 1000000
df_vmt = df_vmt.drop(columns=["length"])
df_0 = df[df.segmentID == 0]
df_count = df_0.groupby("mode_number").count()[["length"]]
df_count = df_count.rename(columns={"length": "num_trips_sampled"})
df_sampled = df_vmt.join(df_count).reset_index().rename(columns={"mode_number": "mode"})
kpi = ResultKPIs.from_dir(dir)
summary = kpi._add_mode_names(df_sampled).set_index("mode").join(kpi.get_kpi_value("vmt_vht"))
summary["sample_rate_vmt"] = 100 * summary["million_VMT_sampled"] / summary["million_VMT"]
summary["sample_rate_num_trips"] = 100 * summary["num_trips_sampled"] / summary["count"]
return summary
# ld_vmt = vmt[0] + vmt[9]
# md_vmt = vmt[17]+vmt[18]
# print(f'ldt: {trip_count[0]+trip_count[9]}')
# print(f'mdt: {trip_count[17]+trip_count[18]}')
from polaris.analyze.result_kpis import ResultKPIs
from polaris.utils.list_utils import first_and_only
dir_dfw = Path("/home/jamie/models/DFW-lever-off")
dir_atl = Path("/home/jamie/models/ATL-lever-off")
# df_dfw = load_all_paths(dir_dfw)
# df_atl = load_all_paths(dir_atl)
report_stats(df_atl, dir_atl)
report_stats(df_dfw, Path("/home/jamie/models/ATL-lever-off"))
results.path_lu
df = results.get_path_links()
df
p = df[df.path_id == 383673]
p
import matplotlib.pyplot as plt
diff = df.groupby("path_id")["entering_time"].max() - df.groupby("path_id")["entering_time"].min()
plt.plot(diff[0:100])
plt.hist(df.travel_time, bins=1000, range=(-200, 200))
print(df[df.travel_time < 0].shape)
print(df[df.travel_time == 0].shape)
print(df[df.travel_time > 0].shape)
import scipy.io
mat = scipy.io.loadmat("/home/jamie/tmp/svtrip_output_703.mat")
foo = mat["polaris_trip135964_VehType13017"][0][0][4].transpose()
foo[1:] - foo[0:-1]
# mat.keys()
And we can plot this path on the network
data = project.network.data_tables
with read_and_close(project.supply_file, spatial=True) as conn:
links_layer = data.plotting_layer("Link", conn)
loc_layer = data.plotting_layer("Location", conn)
path_links = list(results.get_path_links(path_id=673).link_id)
# We create a Folium layer
network_links = folium.FeatureGroup("links")
computed_path = folium.FeatureGroup("path")
locations = folium.FeatureGroup("Locations")
# We do some Python magic to transform this dataset into the format required by Folium
# We are only getting link_id and link_type into the map, but we could get other pieces of info as well
for link_id, row in links_layer.iterrows():
points = row.geo.replace("LINESTRING", "").replace("(", "").replace(")", "").split(", ")
points = "[[" + "],[".join([p.replace(" ", ", ") for p in points]) + "]]"
# We need to take from x/y to lat/long
points = [[x[1], x[0]] for x in eval(points)]
line = folium.vector_layers.PolyLine(points, popup=f"<b>link_id: {link_id}</b>", color="blue", weight=2)
line.add_to(network_links)
if link_id in path_links:
line = folium.vector_layers.PolyLine(points, popup=f"<b>link_id: {link_id}</b>", color="red", weight=5)
line.add_to(computed_path)
for location_id, row in loc_layer[loc_layer.index.isin([90, 740])].iterrows():
point = eval(row.geo.replace("POINT", "").replace(" ", ","))
point = (point[1], point[0])
_ = folium.vector_layers.CircleMarker(
point,
popup=f"<b>Location: {location_id}</b>",
color="black",
radius=6,
fill=True,
fillColor="black",
fillOpacity=1.0,
).add_to(locations)
a, b = network_links.get_bounds()
location = list((np.array(a) + np.array(b)) / 2)
map_osm = folium.Map(location=location, zoom_start=12)
network_links.add_to(map_osm)
computed_path.add_to(map_osm)
locations.add_to(map_osm)
folium.LayerControl().add_to(map_osm)
map_osm
path_trips = trips[["trip", "path", "type", "mode", "origin", "destination"]]
path_trips = path_trips[path_trips.path >= 0]
path_trips
with read_and_close(iteration / "Grid-Demand.sqlite") as conn:
trips = pd.read_sql("SELECT * FROM Trip", conn)
trips.initial_energy_level.unique()
ev_trips = trips[(trips.final_energy_level > 0) & (trips.path > 0)]
ev_trips = ev_trips[["trip_id", "path", "start", "end", "initial_energy_level", "final_energy_level"]]
ev_trips["duration"] = ev_trips.end - ev_trips.start
ev_trips["consumption"] = -ev_trips.final_energy_level + ev_trips.initial_energy_level
ev_trips
path_id = 433
path_links_df = results.get_path_links(path_id=path_id)
print(f"travel_time = {path_links_df.travel_time.sum() / 1000.0}")
print(f"energy_consumption = {path_links_df.energy_consumption.sum() / 1000.0}")
print(f"first_link = {path_links_df.iloc[0]['link_uuid']}")
print(f"last_link = {path_links_df.iloc[-1]['link_uuid']}")
ev_trips[ev_trips.path == path_id][["origin_link", "destination_link", "duration", "consumption"]]
# path_links_df
# Result KPIs
dfs = []
for t in results.timesteps:
df = results.get_path_links_for_timestep(t)
df = df[df["travel_time"] > 0]
df["tt_diff"] = df.travel_time - df.routed_travel_time
df["tt_diff_abs"] = abs(df.travel_time - df.routed_travel_time)
df = df[["link_id", "routed_travel_time", "tt_diff", "tt_diff_abs"]]
link_types = project.network.data_tables.get_table("link")[["length", "type"]].reset_index()
df = pd.merge(df, link_types, left_on="link_id", right_on="link").rename(columns={"length": "total_dist"})
print(df.shape)
df = df[["type", "total_dist", "routed_travel_time", "tt_diff", "tt_diff_abs"]]
df = df.groupby("type").sum()
display(df)
dfs.append(df)
df = pd.concat(dfs).groupby("type").sum()
display(df)
df["abs_gap"] = df["tt_diff_abs"] / df["routed_travel_time"]
df["gap"] = df["tt_diff"] / df["routed_travel_time"]
df
from polaris.analyze.result_kpis import ResultKPIs
ResultKPIs.from_dir(iteration).metric_network_gaps_by_link_type
import seaborn as sns
import matplotlib.pyplot as plt
from polaris.analyze.kpi_comparator import KpiComparator
from polaris.analyze.result_kpis import ResultKPIs
from polaris.utils.file_utils import readlines
from polaris.utils.list_utils import first_and_only
def get_sha(iteration_dir):
log = readlines(iteration_dir / "log" / "polaris_progress.log")
commit = first_and_only([l for l in log if "commit hash" in l]).split(":")[-1].strip()
build = first_and_only([l for l in log if "Compiled at" in l]).split(":")[-1].strip().split("-")[0]
return f"{build}-{commit[0:6]}"
iteration_dir = model_fldr / "Grid_iteration_1_23"
get_sha(iteration_dir)
k = KpiComparator()
def add_run(iteration_dir):
k.add_run(ResultKPIs.from_dir(iteration_dir), get_sha(iteration_dir))
# k.add_run(ResultKPIs.from_dir(model_fldr / "Grid_iteration_1_18"), 'Grid 1%')
# k.add_run(ResultKPIs.from_dir(model_fldr / "Grid_iteration_1_19"), 'Grid 6%')
# k.add_run(ResultKPIs.from_dir(model_fldr / "Grid_iteration_1_20"), 'Grid 25%')
k.add_run(ResultKPIs.from_dir(model_fldr / "Grid_iteration_1_22"), "Grid 0%")
# add_run(model_fldr / "Grid_iteration_1_23")
# add_run(model_fldr / "Grid_iteration_1_24")
# add_run(model_fldr / "Grid_iteration_1_25")
# add_run(model_fldr / "Grid_iteration_1_26")
# add_run(model_fldr / "Grid_iteration_1_27")
# add_run(model_fldr / "Grid_iteration_1_28")
# add_run(model_fldr / "Grid_iteration_1_29")
# add_run(model_fldr / "Grid_iteration_1_30")
# add_run(model_fldr / "Grid_iteration_1_31")
# add_run(model_fldr / "Grid_iteration_1_32")
# add_run(model_fldr / "Grid_iteration_1_33")
# add_run(model_fldr / "Grid_iteration_1_34")
# add_run(model_fldr / "Grid_iteration_1_35")
# add_run(model_fldr / "Grid_iteration_1_36")
# add_run(model_fldr / "Grid_iteration_1_37")
# add_run(model_fldr / "Grid_iteration_1_38")
# add_run(model_fldr / "Grid_iteration_1_39")
# add_run(model_fldr / "Grid_iteration_1_40")
# add_run(model_fldr / "Grid_iteration_1_41")
# add_run(model_fldr / "Grid_iteration_1_42")
# add_run(model_fldr / "Grid_iteration_1_43")
# add_run(model_fldr / "Grid_iteration_1_44")
add_run(model_fldr / "Grid_iteration_1_47")
add_run(model_fldr / "Grid_iteration_1_48")
add_run(model_fldr / "Grid_iteration_1_49")
results_dir = Path("/mnt/cfs2/ShareAndFileTransfer/ForJamie/fleet_clean/fleet_clean-Grid-develop")
# k.add_run(ResultKPIs.from_dir(results_dir / "Grid_01_abm_init_iteration"), 'Grid old')
k.plot_cpu_mem(skip_cache=True)
k.plot_cpu_mem()
ResultKPIs.from_dir(model_fldr / "Grid_iteration_1_20").metric_network_gaps_by_link_type()