Catégories
R

Opening a spatial subset with {sf}

Intersecting an area of interest with a layer at opening time

Days 3 and 4 of 30DayMapChallenge : « polygons » and « green » (previously).

The CORINE Landcover dataset is distributed as a geopackage weighting more than 8 Go. To limit the memory used when we only work on a subset, we can clip it at opening time. Here we will map the Cyprus Island :

library(dplyr)
library(ggplot2)
library(stringr)
library(sf)
library(rnaturalearth)
library(glue)

# Using the contour of Cyprus (enlarged) from naturalearth to clip
bb <- ne_countries(scale = "medium", country = "cyprus", returnclass = "sf") %>% 
  st_transform("EPSG:3035") %>% 
  st_buffer(90000) %>% 
  pull(geometry) %>% 
  st_as_text()

# Corine Land Cover 
# download from https://land.copernicus.eu/pan-european/corine-land-cover/clc2018
# (registration required)
# passing the bounding area
cyprus_clc <- read_sf("data/U2018_CLC2018_V2020_20u1.gpkg", query = glue("
  SELECT * 
  FROM U2018_CLC2018_V2020_20u1
  WHERE st_intersects(Shape, st_polygonfromtext('{bb}'))"))

legend_colors <- list("TRUE"  = "mediumaquamarine",
                      "FALSE" = "grey90")

legend_labs <- list("TRUE"  = "forest",
                    "FALSE" = "other")

# exclude sea (code 523)
# and classify on forest (codes 3xx)
cyprus_clc %>% 
  filter(Code_18 != "523") %>% 
  ggplot() +
  geom_sf(aes(fill = str_detect(Code_18, "^3"),
              color = str_detect(Code_18, "^3"))) +
  scale_fill_manual(name= "type",
                    values = legend_colors,
                    labels = legend_labs) +
  scale_color_manual(name = "type",
                     values = legend_colors,
                     labels = legend_labs) +
  labs(title = "Cyprus",
       subtitle = "Landcover",
       caption = glue("data: Copernicus CLC 2018
                      projection LAEA
                      r.iresmi.net {Sys.Date()}")) +
  theme_minimal() +
  theme(legend.position = "bottom",
               plot.caption = element_text(size = 7))

ggsave("cyprus.png", width = 20, height = 12.36, units = "cm", scale = 1.1, type = "cairo")
Cyprus landcover
Catégories
R

My air travel carbon footprint

I shouldn’t have

library(tidyverse)
library(sf)
library(glue)
library(rnaturalearth)
library(units)

# grams of carbon dioxide-equivalents per passenger kilometer
# https://en.wikipedia.org/wiki/Fuel_economy_in_aircraft
co2_eq <- set_units(88, g/km)

# countries map from Naturalearth
countries <- ne_countries(scale = "small", returnclass = "sf")

# airport code and coordinates to geolocate itineraries
airport <- read_csv("https://raw.githubusercontent.com/jpatokal/openflights/master/data/airports.dat",
                    col_names = c("airport",
                                  "name",
                                  "city",
                                  "country",
                                  "iata",
                                  "icao",
                                  "latitude",
                                  "longitude",
                                  "altitude",
                                  "timezone",
                                  "dst",
                                  "tz",
                                  "type",
                                  "source")) %>% 
  # Add Kai Tak, missig from the airport data
  add_row(iata = "HKGX",
          name = "Kai Tak", 
          city = "Hong Kong",
          latitude = 22.328611,
          longitude = 114.194167)

# itineraries
flight <- read_delim("from-to
LYS-LHR
LHR-LYS
LYS-BOD
LYS-BOD
LYS-BOD
LYS-BOD
BOD-LYS
BOD-LYS
BOD-LYS
LYS-BOD
BOD-LYS
BOD-LGW
LHR-JNB
CPT-JNB
JNB-LHR
LHR-ORY
BOD-ORY
CDG-HKGX
HKGX-PER
SYD-HKGX
HKGX-CDG
ORY-CAY
CAY-BEL
BEL-BSB
BSB-MAO
MAO-VVI
VVI-LPB
LPB-MAO
MAO-BEL
BEL-CAY
CAY-XAU
XAU-CAY
CAY-XAU
XAU-CAY
CAY-XAU
XAU-CAY
CAY-ORY
NCE-MXP
MXP-NCE
CDG-CAY
CAY-MPY
MPY-CAY
CAY-CDG
CDG-HKG
HKG-SYD
SYD-HKG
HKG-SYD
TLN-ORY
CDG-CPH
CPH-ORY
ORY-TLN
CDG-YYZ
YYZ-SFO
SFO-YYZ
YYZ-CDG
ORY-TLN
TLN-ORY
LYS-AMS
AMS-SHJ
SHJ-KTM
KTM-SHJ
SHJ-AMS
AMS-LYS
CDG-AUH
AUH-MCT
MCT-KTM
KTM-PKR
PKR-KTM
KTM-MCT
MCT-AUH
AUH-CDG
GVA-FCO
FCO-GVA
CDG-RUN
RUN-CDG
GVA-KEF
KEF-GVA
CDG-ARN
ARN-KRN
KRN-ARN
ARN-CDG
CDG-RUN
RUN-CDG", delim = "-")

# geolocate
flight_geo <- flight %>% 
  left_join(airport, by = c("from" = "iata")) %>% 
  left_join(airport, by = c("to" = "iata"), suffix = c("_from", "_to"))

# create lines
flight_lines <- flight_geo %>% 
  mutate(line = glue("LINESTRING ({longitude_from} {latitude_from}, {longitude_to} {latitude_to})")) %>% 
  st_as_sf(wkt = "line", crs = "EPSG:4326")

# create great circles and compute costs
flight_geo_gc <- flight_lines %>% 
  st_segmentize(set_units(100, km)) %>% 
  mutate(distance = set_units(st_length(line), km),
         co2 = set_units(distance * co2_eq, t))

# totals
total_flight <- flight_geo_gc %>% 
  st_drop_geometry() %>% 
  summarise(total_distance = sum(distance, na.rm = TRUE),
            total_co2 = sum(co2, na.rm = TRUE))

# map
ggplot() +
  geom_sf(data = countries, fill = "lightgrey", color = "lightgrey") +
  geom_sf(data = flight_geo_gc, color = "red") + 
  # geom_sf(data = flight_lines, color = "blue") + 
  coord_sf(crs = "+proj=eqearth") +
  # coord_sf(crs = "+proj=robin") +
  # coord_sf(crs = "+proj=fouc") +
  # coord_sf(crs = "+proj=eck1") +
  # coord_sf(crs = "+proj=moll") +
  # coord_sf(crs = "+proj=bonne +lat_1=10") +
  # coord_sf(crs = "+proj=laea") +
  labs(title = "My air travel carbon footprint 1993-2020",
       subtitle = glue("{round(total_flight$total_distance, -2)} km - {round(total_flight$total_co2, 1)} teqCO₂")) +
  theme_minimal()
map of flights
Flygskam (equal-earth projection)

Catégories
R

Extract POIs from a Suunto watch

The Suunto watches (Spartan, Suunto 9,…) can record waypoints (or POIs) but although they can be visualized in the Suunto app (or on the watch), they cannot be exported to be used with other tools. It used to be possible to access them from the Movescount website but it was discontinued a few months ago.

So we have to use some black magic tricks to extract them :

  • Make sure Suunto app has storage permission.
  • Go to settings and tap many times the version number. Logging will start.
  • Go to home , pull to refresh the feed
  • Wait a bit
  • Go again to settings tap many times the version it will stop logging
  • On your Android internal storage there will be a folder called stt

Actually, with the current app version, we get an SQLite database in :

android > data > com.stt.android.suunto > files > stt-dump > amer_app.db

Now it’s just a matter of copying the file (via USB, bluetooth, etc.) to a PC, connecting to the database and finding where the POIs are stored (in a table called… pois) ; we can use QGIS or R for that…

With R, we can select this year POIs (dates are stored as UNIX timestamp) and export them as GeoJSON :

library(RSQLite)
library(dplyr)
library(lubridate)
library(sf)
library(leaflet)

cnx <- dbConnect(SQLite(), "~/amer_app.db")

poi <- dbGetQuery(cnx, "SELECT * FROM pois") %>% 
  filter(creation >= format(ymd("2022-01-01"), "%s")) %>% 
  st_as_sf(coords = c("longitude", "latitude"), crs = "EPSG:4326")

poi %>% 
  st_write("~/pois.geojson")

Or we can easily map them :

poi %>% 
  leaflet() %>% 
  addCircleMarkers() %>% 
  addTiles()
Catégories
R

Simplifying polygons layers

The current 2021 french administrative limits database (ADMIN EXPRESS from IGN) is more detailed than the original version (from 50 MB zipped in 2017 to 500 MB zipped now), thanks to a more detailed geometry being currently based on the BD TOPO. However we don’t always need large scale details especially for web applications. The commune layer itself is a huge 400 MB shapefile not really usable for example in a small scale leaflet map. Even the light version (ADMIN EXPRESS COG CARTO) is 120 MB for the commune layer.

Using sf::st_simplify() in R or a similar command in QGIS on these shapefiles would create holes or overlapping polygons, shapefiles not being topologically aware. We could probably convert to lines, build topology, simplify, clean, build polygons in GRASS or ArcGis, but it’s quite a hassle…

A nice solution is using Mapshaper on mapshaper.org, or better for reproducibility using {mapshaper} in R. For such large dataset it is advised to use a node.js install instead of relying on the package’s embedded version.

in red the original, in black the simplified version with départements in bold

On Debian-like :

> sudo apt-get install nodejs npm

or on windows : install https://nodejs.org/. If needed add C:\Users\xxxxxxxx\AppData\Roaming\npm to your $PATH.

> npm config set proxy "http://login:password@proxy:8080" # if necessary
> npm install -g mapshaper

For ms_simplify() we will set sys = TRUE to take advantage of the node.js executable. Experiment with the other parameters to get a resolution that suits you. Here we use Visvalingam at 3%, squeezing the commune layer from 400 MB to 30 MB. From here we rebuild departement, region and epci with ms_dissolve() commands. Then we join back with original attributes and export in a geopackage with some metadata.

library(tidyverse)
library(sf)
library(rmapshaper)
library(geojsonio)
library(janitor)
library(fs)

# ADMIN EXPRESS COG France entière édition 2021 (in WGS84)
# ftp://Admin_Express_ext:Dahnoh0eigheeFok@ftp3.ign.fr/ADMIN-EXPRESS-COG_3-0__SHP__FRA_WM_2021-05-19.7z
# also available on :
# http://files.opendatarchives.fr/professionnels.ign.fr/adminexpress/ADMIN-EXPRESS-COG_3-0__SHP__FRA_WM_2021-05-19.7z


# originals ---------------------------------------------------------------

source_ign <- "~/sig/ADMINEXPRESS/ADMIN-EXPRESS-COG_3-0__SHP__FRA_2021-05-19/ADMIN-EXPRESS-COG/1_DONNEES_LIVRAISON_2021-05-19/ADECOG_3-0_SHP_WGS84G_FRA"

com <- source_ign %>% 
  path("COMMUNE.shp") %>% 
  read_sf() %>% 
  clean_names()

dep <- source_ign %>% 
  path("DEPARTEMENT.shp") %>% 
  read_sf() %>% 
  clean_names()

reg <- source_ign %>% 
  path("REGION.SHP") %>% 
  read_sf() %>% 
  clean_names()

epci <- source_ign %>% 
  path("EPCI.shp") %>% 
  read_sf() %>% 
  clean_names()

# simplify ---------------------------------------------------------------

check_sys_mapshaper()

# 6 min
# using a conversion to geojson_json to avoid encoding problems
com_simpl <- com %>%
  geojson_json(lat = "lat", lon = "long", group = "INSEE_COM", geometry = "polygon", precision = 6) %>%
  ms_simplify(keep = 0.03, method = "vis", keep_shapes = TRUE, sys = TRUE)

dep_simpl <- com_simpl %>% 
  ms_dissolve(field = "insee_dep", sys = TRUE)

reg_simpl <- com_simpl %>% 
  ms_dissolve(field = "insee_reg", sys = TRUE)

epci_simpl <- com_simpl %>% 
  ms_dissolve(field = "siren_epci", sys = TRUE)


# add attributes and export ----------------------------------------------

destination  <- "~/donnees/ign/adminexpress_simpl.gpkg"

com_simpl %>% 
  geojson_sf() %>% 
  st_write(destination, layer = "commune",
           layer_options = c("IDENTIFIER=Communes Adminexpress 2021 simplifiées",
                             "DESCRIPTION=France WGS84 version COG (2021-05). Simplification mapshaper."))

dep_simpl %>% 
  geojson_sf() %>% 
  left_join(st_drop_geometry(dep), by = "insee_dep") %>% 
  st_write(destination, layer = "departement",
           layer_options = c("IDENTIFIER=Départements Adminexpress 2021 simplifiés",
                             "DESCRIPTION=France WGS84 version COG (2021-05). Simplification mapshaper."))

reg_simpl %>% 
  geojson_sf() %>% 
  left_join(st_drop_geometry(reg), by = "insee_reg") %>% 
  st_write(destination, layer = "region",
           layer_options = c("IDENTIFIER=Régions Adminexpress 2021 simplifiées",
                             "DESCRIPTION=France WGS84 version COG (2021-05). Simplification mapshaper."))

epci_simpl %>% 
  geojson_sf() %>% 
  mutate(siren_epci = str_remove(siren_epci, "200054781/")) %>% # remove Grand Paris
  left_join(st_drop_geometry(epci), by = c("siren_epci" = "code_siren")) %>% 
  st_write(destination, layer = "epci",
           layer_options = c("IDENTIFIER=EPCI Adminexpress 2021 simplifiés",
                             "DESCRIPTION=Établissement public de coopération intercommunale France WGS84 version COG (2021-05). Simplification mapshaper."))
  

Télécharger le géopackage (communes, EPCI, départements, régions simplifiés, 2021-05) en WGS84 (EPSG:4326) – 22 Mo

Catégories
R

Does 100 m equal 1 km?

Climbing or not climbing?

Photo : Alexis Martín

In trail running or orienteering people say that if you have to run 100 m of elevation up and down it would take the same time as running flat for 1000 m. We can find a similar old rule of thumb and more recently, some researchers (Davey, Hayes & Norman, 1994 ; Scarf, 2007) added a little science to this vernacular knowledge but with few data points (2 athletes and 300 race results, respectively).

Could we add some modern massive data to check this saying ? Using our dataset scraped from ITRA on 16 949 race results from 2 802 runners, we can fit a basic non linear model to estimate the parameters :

> (model <- results %>% 
>   nls(heures ~ (dist_tot + deniv_tot / k) / v, data = .,
>      start = list(k = 100, v = 8)))

Nonlinear regression model
  model: heures ~ (dist_tot + deniv_tot/k)/v
   data: .
     k      v 
87.239  9.565 
 residual sum-of-squares: 313542

Number of iterations to convergence: 3 
Achieved convergence tolerance: 1.292e-06

> confint(model)

Waiting for profiling to be done...
       2.5%     97.5%
k 81.672165 93.296957
v  9.332102  9.809812

So we see that the average flat speed sustainable over a long period of our sample (which is biased towards elite runners) is around 9.6 km⋅h-1 and that 1 km flat is equivalent to 87 m [82 – 93] of height gain, not far from the old 100 m. Of course these values will vary according the athlete shape, the total race length and profile and many other parameters…

References

Davey, R. C., Hayes, M., & Norman, J. M. (1994). Running uphill : An experimental result and its applications. The Journal of the Operational Research Society, 45(1), 25. https://doi.org/10.2307/2583947

Scarf, P. (2007). Route choice in mountain navigation, Naismith’s rule, and the equivalence of distance and climb. Journal of Sports Sciences, 25(6), 719‑726. https://doi.org/10.1080/02640410600874906

Catégories
R

Playing with Roman numerals

What is the longest year number (yet) written in Roman numerals ?

library(tidyverse)

tibble(y = 1:2020) %>%
  mutate(r = as.roman(y),
         l = str_length(r)) %>%
  slice_max(l)
# A tibble: 1 x 3
      y r                 l
  <int> <roman>       <int>
1  1888 MDCCCLXXXVIII    13

It is year 1888, with 13 characters…

And the largest writable number being 3899, according to the strict rules in R (however some say it’s 3999),

tibble(y = 1:5000) %>%
	mutate(r = as.roman(y),
	       l = str_length(r)) %>%
	filter(lead(is.na(l))) %>% 
  	slice_min(l)
# A tibble: 1 x 3
      y r               l
  <int> <roman>     <int>
1  3899 MMMDCCCXCIX    11

the longest overall year will be year 3888 with 15 characters.

tibble(y = 1:3899) %>%
	mutate(r = as.roman(y),
	       l = str_length(r)) %>%
	slice_max(l)
# A tibble: 1 x 3
      y r                   l
  <int> <roman>         <int>
1  3888 MMMDCCCLXXXVIII    15

Nice pattern :

tibble(y = 1:3899) %>%
	mutate(r = as.roman(y),
	       l = str_length(r)) %>%
	ggplot(aes(y, l)) +
	# geom_col(width = 1) +
	geom_point(alpha = .2) +
	# geom_line(alpha = .5) +
	geom_smooth() +
	labs(title = "Characters length of roman numeral years",
		 x = "year",
		 y = "characters")
Characters length of roman numeral years

And there are only eleven palindromic years :

tibble(y = 1:3899) %>%
	mutate(r = as.character(as.roman(y)),
	       rr = stringi::stri_reverse(r)) %>% 
	filter(r == rr,
	       str_length(r) > 1)
# A tibble: 11 x 3
       y r     rr   
   <int> <chr> <chr>
 1     2 II    II   
 2     3 III   III  
 3    19 XIX   XIX  
 4    20 XX    XX   
 5    30 XXX   XXX  
 6   190 CXC   CXC  
 7   200 CC    CC   
 8   300 CCC   CCC  
 9  1900 MCM   MCM  
10  2000 MM    MM   
11  3000 MMM   MMM  
Catégories
R

Trail running : is it worth starting?

Should I stay or should I go ?

the Clash

https://www.flickr.com/photos/alexismarod/33815546074/

Trail running can be hard (Malliaropoulos, Mertyri and Tsaklis 2015; Hespanhol Junior, van Mechelen and Verhagen 2017; Istvan et al. 2019; Messier et al. 2018). During one, I was wondering if we can avoid the race completely and predict the race time according to distance, elevation change, sex and performance index (so yes, we have to run sometimes to measure it).

We need a dataset to play with. ITRA maintains a quite comprehensive database (International Trail Running Association 2020), but no current dataset is openly available. However we can use the hidden JSON API to scrape a sample. I created a R package {itra} for this purpose ; with it we can get some runners and their results:

NOTE (2021-09) : the ITRA website has changed, so the package doesn’t work any longer.

remotes::install_gitlab("mdagr/itra")
library(itra)
library(tidyverse)
library(glue)
library(hms)
library(lubridate)
library(gghighlight)
library(progress)

A test:

itra_runners("jornet")
#> # A tibble: 5 x 15
#>   id_coureur     id  cote nom   prenom quartz team      M L     XL      XXL
#>        <int>  <int> <int> <chr> <chr>   <int> <chr> <int> <lgl> <lgl> <int>
#> 1       2704 2.70e3   950 JORN… Kilian     20 ""      898 NA    NA      896
#> 2     707322 7.07e5   538 RENU… Arturo      0 ""       NA NA    NA       NA
#> 3    2667939 2.67e6   520 PARE… Samuel      0 ""       NA NA    NA       NA
#> 4    2673933 2.67e6   468 PÉRE… Anna        0 ""       NA NA    NA       NA
#> 5    2673935 2.67e6   468 RIUS… Sergi       0 ""       NA NA    NA       NA
#> # … with 4 more variables: S <int>, XS <int>, XXS <lgl>, palmares <chr>

It works…

Get the data

So, we’re going to build a daframe of single letters and random page index to create our dataset of runners. I’ll manually add some low index to oversample the elite athletes otherwise we will probably miss them entirely.

# helpers -----------------------------------------------------------------

# compute the mode (to find the runner sex from its details ; see below)
stat_mode <- function(x, na.rm = FALSE) {
  if(na.rm) {
    x <- x[!is.na(x)]
  }
  
  ux <- unique(x)
  ux[which.max(tabulate(match(x, ux)))]
}


# sample -------------------------------------------------------------

# progress bar versions of ITRA functions (scraping will be slow, we want to know if it works)
pb_itra_runners <- function(...) {
  pb$tick()
  itra_runners(...)
}

pb_itra_runner_results <- function(...) {
  pb$tick()
  itra_runner_results(...)
}

pb_itra_runner_details <- function(...) {
  pb$tick()
  itra_runner_details(...)
}

# dataset generation from a "random" set of index
#
oversample_high_score <- c(0, 50, 100, 250, 500)
my_sample <- tibble(l = letters,
                 n = c(oversample_high_score, 
                       sample(0:100000,
                              length(letters) - length(oversample_high_score)))) %>% 
  expand(l, n) %>% 
  distinct(l, n) 

We have 676 tuples of (letter, index). We can now start querying the website, we should get 5 runners (by default) for each tuple. But we will have some duplicates because for example at index 0 we will get « Kilian Jornet » for the letters « k », « i », « l »…

I use purr::map2 to iterate along our sample, using slowly to avoid overloading the website and possibly to get NA instead of stopping errors.

You’ll see that the JSON variables have french or english names!

#  querying runners --------

pb <- progress_bar$new(format = "[:bar] :current/:total remaining: :eta",
             total = nrow(my_sample))

my_dataset <- my_sample %>% 
  mutate(runners = map2(l, n, possibly(slowly(pb_itra_runners, rate_delay(.5)), NA)))

# runners list
runners <- my_dataset %>% 
  drop_na(runners) %>% 
  unnest(runners) %>% 
  distinct(id_coureur, .keep_all = TRUE) %>% 
  select(-l, -n)

Here instead of the nominal 3380 runners I get only 2796…

Then we can query for their race results and their sex.

pb <- progress_bar$new(format = "[:bar] :current/:total remaining: :eta",
                       total = nrow(runners))
results <- runners %>% 
  select(id, first_name = prenom, last_name = nom) %>% 
  mutate(runner_results = pmap(., possibly(slowly(pb_itra_runner_results, rate_delay(.5)), NA))) %>% 
  unnest(runner_results)

# performance index by trail category and sex
pb <- progress_bar$new(format = "[:bar] :current/:total remaining: :eta",
             total = nrow(runners))
runners_details <- runners %>% 
  select(id) %>% 
  mutate(details = map(id, possibly(slowly(pb_itra_runner_details, rate_delay(.5)), NA))) %>% 
  unnest(details)

Some preprocessing is necessary. Sex is not a direct property of the runners, we must get it from runners_details. We then join our 3 dataframes and compute some new variables.

Levels come from:

https://itra.run/content/faq-indice-perfomance
# preprocessing ----
runners_sex <- runners_details %>% 
  group_by(id) %>% 
  summarise(sex = stat_mode(sexe, na.rm = TRUE))

results_clean <- results %>% 
  rename(cote_course = cote) %>% 
  left_join(select(runners, -nom, -prenom), by = "id") %>% 
  left_join(runners_sex, by = "id") %>% 
  mutate(dist_tot = as.numeric(dist_tot),
       dt = ymd(dt),
       time = lubridate::hms(temps),
       hours = as.numeric(time, "hours"),
       kme = dist_tot + deniv_tot / 100,
       level = factor(
                  case_when(
                   (cote > 825 & sex == "H") | (cote > 700 & sex == "F") ~ "elite",
                   (cote > 750 & sex == "H") | (cote > 625 & sex == "F") ~ "expert",
                   (cote > 650 & sex == "H") | (cote > 550 & sex == "F") ~ "advanced",
                   (cote > 500 & sex == "H") | (cote > 475 & sex == "F") ~ "strong",
                   (cote > 350 & sex == "H") | (cote > 350 & sex == "F") ~ "intermediate",
                   is.na(sex) ~ NA_character_,
                   TRUE ~ "novice"), 
                 levels = c("novice", "intermediate", "strong", "advanced", "expert", "elite")))
  

Exploratory analysis

Having a quick look at the dataset…

# analysis -----------------------------------------------------------------

# runners' ITRA performance index
runners %>% 
  left_join(runners_sex) %>% 
  ggplot(aes(cote, fill = sex)) +
    geom_histogram(binwidth = 10)
Most athletes have an ITRA score between 300 and 700. We can also see the results of our oversampling with many athletes added above 700. We can notice trail running is sadly a male (H) sport.
# distance ran
results_clean %>% 
  ggplot(aes(dist_tot)) +
    geom_histogram(binwidth = 2)
We can see the common distances at 25, 50, 100 km and 160 km (100 miles)
# times ran
results_clean %>% 
  ggplot(aes(hours)) +
    geom_histogram(binwidth = 1 / 6)
Most athletes run about 4 hours
# by level
results_clean %>% 
  drop_na(level) %>% 
  filter(hours != 0) %>% 
  ggplot(aes(kme, hours, color = level)) +
    geom_point(alpha = .3) +
    gghighlight() +
    coord_cartesian(xlim = c(0, 320), ylim = c(0, 65)) +
    facet_wrap(~ level)
Novices are slow and don’t run long races. Elites are fast from short races to ultra-trail

Prediction

Now, back to my initial question. Can I just predict my race time and avoid a long pain by not running?

I will use a random forest. Since the rows are not independent (we have several results per runner), I use a group kfold on the runner id.

library(caret)
library(ranger)

results_rf <- results_clean %>% 
	drop_na(hours, cote)
	
# grouped CV folds
group_fit_control <- trainControl(
	index = groupKFold(results_rf$id, k = 10),
	method = "cv")

rf_fit <- train(hours ~ dist_tot + deniv_tot + deniv_neg_tot + cote + sex, 
				data = results_rf, 
				method = "rf",
				trControl = group_fit_control)
rf_fit
Random Forest 

18339 samples
    5 predictors

No pre-processing
Resampling: Cross-Validated (10 fold) 
Summary of sample sizes: 16649, 16704, 16338, 16524, 16407, 16223, ... 
Resampling results across tuning parameters:

  mtry  splitrule   RMSE      Rsquared   MAE      
  2     variance    1.822388  0.9472348  0.8747978
  2     extratrees  1.844228  0.9463288  0.8810525
  3     variance    1.827556  0.9469227  0.8801065
  3     extratrees  1.847274  0.9459055  0.8800262
  5     variance    1.873645  0.9440697  0.9021319
  5     extratrees  1.853316  0.9455854  0.8882530

Tuning parameter 'min.node.size' was held constant at a value of 5
RMSE was used to select the optimal model using the smallest value.
The final values used for the model were mtry = 2, splitrule = variance
 and min.node.size = 5.

So with a root mean square error of 1.82 hours, I can’t say that my model is super precise. However in my case, my last 16 km (01:40:00) is pretty well predicted!

predict(rf_fit, newdata = tibble(dist_tot = 16,
								 deniv_tot = 600,
								 deniv_neg_tot = 600,
								 cote = 518,
								 sex = "H")) %>% 
	magrittr::multiply_by(3600) %>% 
	floor() %>% 
	seconds_to_period() %>% 
	hms::hms()
# 01:37:42

So next time I’ll just run the model instead of running the race.

I’ll leave the tuning to the specialists. While writing this post I found another (Fogliato, Oliveira, and Yurko 2020) potential similar use for the {itra} package.

References

  • Fogliato, Riccardo, Natalia L. Oliveira and Ronald Yurko. « TRAP: A Predictive Framework for Trail Running Assessment of Performance ». arXiv:2002.01328 [stat], 12 july 2020. http://arxiv.org/abs/2002.01328.
  • Hespanhol Junior, Luiz Carlos, Willem van Mechelen and Evert Verhagen. 2017. « Health and Economic Burden of Running-Related Injuries in Dutch Trailrunners: A Prospective Cohort Study ». Sports Medicine 47 (2): 367‑77. https://doi.org/10.1007/s40279-016-0551-8.
  • International Trail Running Association. « ITRA », 2020. https://itra.run/.
  • Istvan, Albertus Oosthuizen, Paul Yvonne, Jeremy Ellapen Terry, Barnard Marco, Bongani Qumbu Timothy, Valery Hammill Henriëtte and Lukas Strydom Gert. 2019. « Common ultramarathon trail running injuries and illnesses: A review (2007-2016) ». International Journal of Medicine and Medical Sciences 11 (4): 36‑42. https://doi.org/10.5897/IJMMS2018.1386.
  • Malliaropoulos, Nikolaos, Dimitra Mertyri and Panagiotis Tsaklis. 2015. « Prevalence of Injury in Ultra Trail Running ». Human Movement 16 (2). https://doi.org/10.1515/humo-2015-0026.
  • Messier, Stephen P., David F. Martin, Shannon L. Mihalko, Edward Ip, Paul DeVita, D. Wayne Cannon, Monica Love et al. 2018. « A 2-Year Prospective Cohort Study of Overuse Running Injuries: The Runners and Injury Longitudinal Study (TRAILS) ». The American Journal of Sports Medicine 46 (9): 2211‑21. https://doi.org/10.1177/0363546518773755.
Catégories
R

Using the geofacet package to spatially arrange plots

The {geofacet} package allows to « arrange a sequence of plots of data for different geographical entities into a grid that strives to preserve some of the original geographical orientation of the entities« .

Like the previous post, it’s interesting if you view each entity as a unit and don’t care for its real size or weight, and don’t want to spend too much time manually finding the best grid.

We will again use the same COVID-19 dataset. We manually add the overseas départements once we have found the right grid (by trying different seeds) and adjust Corsica position.

COVID-19 deceased in hospital, by département, for 100 000 inhab.
# packages ----------------------------------------------------------------
library(tidyverse)
library(httr)
library(fs)
library(sf)
library(readxl)
library(janitor)
library(glue)
library(geofacet)
# also install ragg

# sources -----------------------------------------------------------------

# https://www.data.gouv.fr/fr/datasets/donnees-hospitalieres-relatives-a-lepidemie-de-covid-19/
fichier_covid <- "donnees/covid.csv"
url_donnees_covid <- "https://www.data.gouv.fr/fr/datasets/r/63352e38-d353-4b54-bfd1-f1b3ee1cabd7"

# https://www.insee.fr/fr/statistiques/2012713#tableau-TCRD_004_tab1_departements
fichier_pop <- "donnees/pop.xls"
url_donnees_pop <- "https://www.insee.fr/fr/statistiques/fichier/2012713/TCRD_004.xls"

# Adminexpress : à télécharger manuellement
# https://geoservices.ign.fr/documentation/diffusion/telechargement-donnees-libres.html#admin-express
aex <- path_expand("~/Downloads/ADMIN-EXPRESS_2-2__SHP__FRA_2020-02-24/ADMIN-EXPRESS/1_DONNEES_LIVRAISON_2020-02-24")


# config ------------------------------------------------------------------

options(scipen = 999)

force_download <- FALSE # retélécharger même si le fichier existe et a été téléchargé aujourd'hui ?


# téléchargement -------------------------------------------------

if (!dir_exists("donnees")) dir_create("donnees")
if (!dir_exists("resultats")) dir_create("resultats")
if (!dir_exists("resultats/animation_spf")) dir_create("resultats/animation_spf")

if (!file_exists(fichier_covid) |
    file_info(fichier_covid)$modification_time < Sys.Date() |
    force_download) {
  GET(url_donnees_covid,
      progress(),
      write_disk(fichier_covid, overwrite = TRUE)) %>%
    stop_for_status()
}

if (!file_exists(fichier_pop)) {
  GET(url_donnees_pop,
      progress(),
      write_disk(fichier_pop)) %>%
    stop_for_status()
}

covid <- read_csv2(fichier_covid)

pop <- read_xls(fichier_pop, skip = 2) %>%
  clean_names()

# adminexpress prétéléchargé
dep <- read_sf(path(aex, "ADE_2-2_SHP_LAMB93_FR/DEPARTEMENT.shp")) %>%
  clean_names() %>%
  st_set_crs(2154)


# construction de la grille ----------------------------------------

grid_fr <- dep %>%
  select(insee_dep, nom_dep) %>%
  grid_auto(names = "nom_dep", codes = "insee_dep", seed = 4) %>%
  add_row(row = 8,
          col = 1,
          name_nom_dep = "Guadeloupe",
          code_insee_dep = "971") %>%
  add_row(row = 9,
          col = 1,
          name_nom_dep = "Martinique",
          code_insee_dep = "972") %>%
  add_row(row = 10,
          col = 1,
          name_nom_dep = "Guyane",
          code_insee_dep = "973") %>%
  add_row(row = 7,
          col = 13,
          name_nom_dep = "Mayotte",
          code_insee_dep = "976") %>%
  add_row(row = 8,
          col = 13,
          name_nom_dep = "La Réunion",
          code_insee_dep = "974")

grid_fr[grid_fr$code_insee_dep %in% c("2A", "2B"), "col"] <- 13
grid_fr[grid_fr$code_insee_dep %in% c("2A", "2B"), "row"] <- grid_fr[grid_fr$code_insee_dep %in% c("2A", "2B"), "row"] - 1


# graphique -----------------------------------------------------

df <- covid %>%
  filter(sexe == 0) %>%
  rename(deces = dc,
         reanim = rea,
         hospit = hosp) %>%
  left_join(pop,
            by = c("dep" = "x1")) %>%
  mutate(incidence = deces / x2020_p * 100000) %>%
  rename(insee_dep = dep) %>%
  left_join(grid_fr %>%
              select(nom_dep = name_nom_dep,
                     insee_dep = code_insee_dep)) %>%
  drop_na(insee_dep) %>%
  ggplot(aes(jour, incidence)) +
    geom_area() +
    facet_geo(~ nom_dep, grid = grid_fr) +
    labs(title = "Mortalité",
       subtitle = "COVID-19 - France",
       x = "date",
       y = "décès pour\n100 000 hab.",
       caption = glue("http://r.iresmi.net/\ndonnées SPF {Sys.Date()}")) +
    theme_minimal() +
    theme(strip.text = element_text(hjust = 0, size = 7))

ggsave(glue("resultats/covid_fr_mortalite_geofacette_{Sys.Date()}.png"),
       width = 25, height = 20, units = "cm", scaling = .8, res = 300, device = ragg::agg_png)

Catégories
R

Generalized Monty Hall problem

A simulation of the Monty Hall problem outcomes for n doors (k opened) à la Tidyverse…

library(tidyverse)

# sample vectors whether they have one or more elements
resample <- function(x, ...) x[sample.int(length(x), ...)]

monty <- function(doors = 3, monty_opens_doors = 1, n = 10000, seed = 0) {
	set.seed(seed)
	tibble(car = sample(doors, n, replace = TRUE),
	       choice = sample(doors, n, replace = TRUE)) %>% 
	  rowwise() %>% 
	  mutate(monty_chose = list(resample(setdiff(1:doors, c(car, choice)), monty_opens_doors)),
	         win_no_switch = car == choice,
	         win_switch = car == resample(setdiff(1:doors, unlist(c(choice, monty_chose))), 1)) %>% 
	  ungroup() %>% 
	  summarise(win_if_not_switching = sum(win_no_switch) / n() * 100,
	            win_with_switching = sum(win_switch) / n() * 100)
}
> monty() # classic values
# A tibble: 1 x 2
  win_if_not_switching win_with_switching
                 <dbl>              <dbl>
1                 33.4               66.6
> monty(10) # more doors (10), 1 opened
# A tibble: 1 x 2
  win_if_not_switching win_with_switching
                 <dbl>              <dbl>
1                 10.4               11.0
> monty(10, 3) # 10 doors, 3 opened
# A tibble: 1 x 2
  win_if_not_switching win_with_switching
                 <dbl>              <dbl>
1                 10.4               15.2

So, switch…

Catégories
R

Polygons to hexagons

Hexagon tessellation using the great {geogrid} package.

The départements are the second level of administrative government in France. They neither have the same area nor the same population and this heterogeneity provides a few challenges for a fair and accurate map representation (see the post on smoothing).

However if we are just interested in the départements as units, we can use a regular grid for visualization. Since France is often called the hexagon, we could even use an hexagon tiling (a fractal map !)…

Creating the grid and conserving minimal topological relations and the general shape can be time consuming, but thanks to Geogrid it’s quite easy. The geogrid dev page provides nice examples. We will reuse our code of the COVID19 animation. The resulting GIS file is provided below.

# Carto décès COVID 19 hexagones
# France métro. + DOM
# Animation
# DONNEES SPF


# packages ----------------------------------------------------------------
library(tidyverse)
library(httr)
library(fs)
library(sf)
library(readxl)
library(janitor)
library(glue)
library(tmap)
library(grid)
library(classInt)
library(magick)
library(geogrid)


# sources -----------------------------------------------------------------

# https://www.data.gouv.fr/fr/datasets/donnees-hospitalieres-relatives-a-lepidemie-de-covid-19/
fichier_covid <- "donnees/covid.csv"
url_donnees_covid <- "https://www.data.gouv.fr/fr/datasets/r/63352e38-d353-4b54-bfd1-f1b3ee1cabd7"

# https://www.insee.fr/fr/statistiques/2012713#tableau-TCRD_004_tab1_departements
fichier_pop <- "donnees/pop.xls"
url_donnees_pop <- "https://www.insee.fr/fr/statistiques/fichier/2012713/TCRD_004.xls"

# Adminexpress : à télécharger manuellement
# https://geoservices.ign.fr/documentation/diffusion/telechargement-donnees-libres.html#admin-express
aex <- path_expand("~/Downloads/ADMIN-EXPRESS_2-2__SHP__FRA_2020-02-24/ADMIN-EXPRESS/1_DONNEES_LIVRAISON_2020-02-24")


# config ------------------------------------------------------------------

force_download <- FALSE # retélécharger même si le fichier existe et a été téléchargé aujourd'hui ?


# téléchargement ------------------------------------------------------

if (!dir_exists("donnees")) dir_create("donnees")
if (!dir_exists("resultats")) dir_create("resultats")
if (!dir_exists("resultats/animation_spf_hex")) dir_create("resultats/animation_spf_hex")

if (!file_exists(fichier_covid) |
    file_info(fichier_covid)$modification_time < Sys.Date() |
    force_download) {
  GET(url_donnees_covid,
      progress(),
      write_disk(fichier_covid, overwrite = TRUE)) %>%
    stop_for_status()
}

if (!file_exists(fichier_pop)) {
  GET(url_donnees_pop,
      progress(),
      write_disk(fichier_pop)) %>%
    stop_for_status()
}


# données -----------------------------------------------------------------

covid <- read_csv2(fichier_covid)

# adminexpress prétéléchargé
dep <- read_sf(path(aex, "ADE_2-2_SHP_LAMB93_FR/DEPARTEMENT.shp")) %>%
  clean_names() %>%
  mutate(surf_ha = st_area(geometry) * 10000) %>%
  st_set_crs(2154)

# grille hexagonale
dep_cells_hex <- calculate_grid(shape = dep, grid_type = "hexagonal", seed = 3)
dep_hex <- assign_polygons(dep, dep_cells_hex) %>%
  st_set_crs(2154)

# Pour les DOM on duplique et déplace un département existant
d971 <- dep_hex[dep_hex$insee_dep == "29", ]
d971$geometry[[1]] <- d971$geometry[[1]] + st_point(c(0, -150000))
d971$insee_dep <- "971"

d972 <- dep_hex[dep_hex$insee_dep == "29", ]
d972$geometry[[1]] <- d972$geometry[[1]] + st_point(c(0, -250000))
d972$insee_dep <- "972"

d973 <- dep_hex[dep_hex$insee_dep == "29", ]
d973$geometry[[1]] <- d973$geometry[[1]] + st_point(c(0, -350000))
d973$insee_dep <- "973"

d974 <- dep_hex[dep_hex$insee_dep == "2A", ]
d974$geometry[[1]] <- d974$geometry[[1]] + st_point(c(0, 250000))
d974$insee_dep <- "974"

d976 <- dep_hex[dep_hex$insee_dep == "2A", ]
d976$geometry[[1]] <- d976$geometry[[1]] + st_point(c(0, 350000))
d976$insee_dep <- "976"

dep_hex <- rbind(dep_hex, d971, d972, d973, d974, d976)

# population
pop <- read_xls(fichier_pop, skip = 2) %>%
  clean_names()

# lignes de séparation DOM / métropole
encarts <- st_multilinestring(
  list(st_linestring(matrix(c(1100000, 6500000,
                              1100000, 6257000,
                              1240000, 6257000), byrow = TRUE, nrow = 3)),
       st_linestring(matrix(c(230000, 6692000,
                              230000, 6391000), byrow = TRUE, nrow = 2)))) %>%
  st_sfc() %>%
  st_sf(id = 1, geometry = .) %>%
  st_set_crs(2154)

# traitement --------------------------------------------------------------

# jointures des données
creer_df <- function(territoire, date = NULL) {
  territoire %>%
    left_join(pop, by = c("insee_dep" = "x1")) %>%
    left_join(
      covid %>%
        filter(jour == if_else(is.null(date), max(jour), date),
               sexe == 0) %>%
        rename(deces = dc,
               reanim = rea,
               hospit = hosp),
      by = c("insee_dep" = "dep")) %>%
    mutate(incidence = deces / x2020_p * 100000)
}

incidence <- creer_df(dep_hex)

set.seed(1234)
classes <- classIntervals(incidence$incidence, n = 6, style = "kmeans", dataPrecision = 0)$brks

# carto -------------------------------------------------------------------
# décès cate du dernier jour dispo

carte <- tm_layout(title = glue("COVID-19\nFrance\n{max(covid$jour)}"),
                         legend.position = c("left", "bottom"),
                         frame = FALSE) +
  tm_shape(incidence) +
  tm_polygons(col = "incidence", title = "décés\ncumulés pour\n100 000 hab.",
              breaks = classes,
              palette = "viridis",
              legend.reverse = TRUE,
              legend.format = list(text.separator = "à moins de",
                                   digits = 0)) +
  tm_text("insee_dep", size = .8) +
  tm_shape(encarts) +
  tm_lines(lty = 3) +
  tm_credits(glue("http://r.iresmi.net/
                    classif. kmeans
                    données départementales Santé Publique France,
                    INSEE RP 2020, d'après IGN Adminexpress 2020"),
             position = c(.6, 0),
             size = .5)

fichier_carto <- glue("resultats/covid_hex_fr_{max(covid$jour)}.png")

tmap_save(carte, fichier_carto, width = 900, height = 900, scale = .4)


# animation ---------------------------------------------------------------

image_animation <- function(date) {
  message(glue("\n\n{date}\n==========\n"))

  m <- creer_df(dep_hex, date) %>%
    tm_shape() +
    tm_polygons(col = "incidence", title = "décés\ncumulés pour\n100 000 hab.",
                breaks = classes,
                palette = "viridis",
                legend.reverse = TRUE,
                legend.format = list(text.separator = "à moins de",
                                     digits = 0)) +
    tm_text("insee_dep", size = .8) +
    tm_shape(encarts) +
    tm_lines(lty = 3) +
    tm_layout(title = glue("COVID-19\nFrance\n{date}"),
              legend.position = c("left", "bottom"),
              frame = FALSE) +
    tm_credits(glue("http://r.iresmi.net/
                    classif. kmeans
                    données départementales Santé Publique France,
                    INSEE RP 2020, d'après IGN Adminexpress 2020"),
               position = c(.6, 0),
               size = .5)

  tmap_save(m, glue("resultats/animation_spf_hex/covid_fr_{date}.png"),
            width = 800, height = 800, scale = .4,)
}

unique(covid$jour) %>%
  walk(image_animation)

animation <- glue("resultats/deces_covid19_fr_hex_spf_{max(covid$jour)}.gif")

dir_ls("resultats/animation_spf_hex") %>%
  map(image_read) %>%
  image_join() %>%
  image_animate(fps = 2, optimize = TRUE) %>%
  image_write(animation)

COVID decease

The global shape and relations are well rendered. Deformations are quite important for the small départements around Paris, but the map remains legible.

Shift