diff --git a/DESCRIPTION b/DESCRIPTION index 88f1c56..e3f626e 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,13 +1,19 @@ Package: FluxDataKit Title: Flux Data Kit -Version: 0.9 +Version: 3.0 Authors@R: c( person( family = "Hufkens", given = "Koen", email = "koen.hufkens@gmail.com", role = c("aut", "cre"), - comment = c(ORCID = "0000-0002-5070-8109")) + comment = c(ORCID = "0000-0002-5070-8109")), + person( + family = "Benjamin", + given = "Stocker", + email = "benjamin.stocker@gmail.com", + comment = c(ORCID = "0000-0003-2697-9096"), + role = c("ctb")) ) Description: A processing workflow for aggregated flux and remote sensing data. Returns both Land Surface Model or CSV based harmonized and gap filled data. @@ -29,8 +35,7 @@ Imports: lubridate, recipes, readr, - here, - cowplot + here Suggests: knitr, rmarkdown, diff --git a/R/fdk_get_sequence.R b/R/fdk_get_sequence.R index 3ba8e9c..6356f17 100644 --- a/R/fdk_get_sequence.R +++ b/R/fdk_get_sequence.R @@ -28,45 +28,36 @@ fdk_get_sequence <- function( df <- df |> mutate(good_gpp = ifelse(NEE_VUT_REF_QC > qc_threshold, TRUE, FALSE), - good_le = ifelse(LE_F_MDS_QC > qc_threshold, TRUE, FALSE)) - - # determine sequences of consecutive TRUE and merge if gap between them is short - instances_merged <- get_consecutive( - df$good_gpp, - merge_threshold = leng_threshold, - do_merge = TRUE - ) - - df_sequences_merged <- tibble( - start = lubridate::as_date(df$TIMESTAMP[instances_merged$idx_start]), - end = lubridate::as_date(df$TIMESTAMP[instances_merged$idx_start + instances_merged$len - 1]) - ) - - # determine longest sequence of good quality data - longest_sequence <- instances_merged |> - filter(len == max(instances_merged$len)) - - out <- tibble( - sitename = site, - start = lubridate::as_date(df$TIMESTAMP[longest_sequence$idx_start]), - end = lubridate::as_date(df$TIMESTAMP[longest_sequence$idx_start + longest_sequence$len - 1])) |> - - # truncate to entire years (1. Jan - 31. Dec) - mutate( - year_start_fullyearsequence = ifelse( - lubridate::yday(start) == 1, - lubridate::year(start), - lubridate::year(start) + 1), - year_end_fullyearsequence = ifelse( - lubridate::yday(end) >= 365, - lubridate::year(end), - lubridate::year(end) - 1 - )) |> - mutate( - nyears = year_end_fullyearsequence - year_start_fullyearsequence + 1 + good_le = ifelse(LE_F_MDS_QC > qc_threshold, TRUE, FALSE), + good_lecorr = ifelse(LE_F_MDS_QC > qc_threshold & !is.na(LE_CORR), TRUE, FALSE) + ) + + out <- get_sequence_byvar(site, df, df$good_gpp, leng_threshold, TRUE) |> + rename(start_gpp = start, + end_gpp = end, + year_start_gpp = year_start, + year_end_gpp = year_end, + nyears_gpp = nyears, + drop_gpp = drop) |> + left_join( + get_sequence_byvar(site, df, df$good_le, leng_threshold, TRUE) |> + rename(start_le = start, + end_le = end, + year_start_le = year_start, + year_end_le = year_end, + nyears_le = nyears, + drop_le = drop), + by = join_by(sitename) ) |> - mutate( - drop = ifelse(nyears < 1, TRUE, FALSE) + left_join( + get_sequence_byvar(site, df, df$good_lecorr, leng_threshold, TRUE) |> + rename(start_lecorr = start, + end_lecorr = end, + year_start_lecorr = year_start, + year_end_lecorr = year_end, + nyears_lecorr = nyears, + drop_lecorr = drop), + by = join_by(sitename) ) if (do_plot){ @@ -128,8 +119,8 @@ fdk_get_sequence <- function( ggplot2::geom_rect( data = out, ggplot2::aes( - xmin = lubridate::ymd(paste0(year_start_fullyearsequence, "-01-01")), - xmax = lubridate::ymd(paste0(year_end_fullyearsequence, "-12-31")), + xmin = lubridate::ymd(paste0(year_start_gpp, "-01-01")), + xmax = lubridate::ymd(paste0(year_end_gpp, "-12-31")), ymin = min(df$GPP_NT_VUT_REF, na.rm = TRUE), ymax = max(df$GPP_NT_VUT_REF, na.rm = TRUE) ), @@ -169,6 +160,58 @@ fdk_get_sequence <- function( return(out) } +get_sequence_byvar <- function(site, df, good, leng_threshold, do_merge){ + + if (any(good)){ + # determine sequences of consecutive TRUE and merge if gap between them is short + inst_merged <- get_consecutive( + good, + merge_threshold = leng_threshold, + do_merge = do_merge + ) + + # determine longest sequence of good quality data + longest_seq <- inst_merged |> + filter(len == max(inst_merged$len)) + + # get start and end date of longest sequences + out <- tibble( + sitename = site, + start = lubridate::as_date(df$TIMESTAMP[longest_seq$idx_start]), + end = lubridate::as_date(df$TIMESTAMP[longest_seq$idx_start + longest_seq$len - 1])) |> + + # truncate to entire years (1. Jan - 31. Dec) + mutate( + year_start = ifelse( + lubridate::yday(start) == 1, + lubridate::year(start), + lubridate::year(start) + 1), + year_end = ifelse( + lubridate::yday(end) >= 365, + lubridate::year(end), + lubridate::year(end) - 1 + )) |> + mutate( + nyears = year_end - year_start + 1 + ) |> + mutate( + drop = ifelse(nyears < 1, TRUE, FALSE) + ) + } else { + out <- tibble( + sitename = site, + start = NA, + end = NA, + year_start = NA, + year_end = NA, + nyears = 0, + drop = TRUE + ) + } + + +} + get_consecutive <- function( good, merge_threshold = 5, diff --git a/README.md b/README.md index 22a786c..53a4408 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Fluxnet aggregation project +# Multi-network ecosystem flux data compilation This project is the framework used to create the LEMONTREE "flux data kit", a dataset with consistent model data for use and re-use. In the interest of consistency across the community we re-use the PLUMBER-2 framework, with a few exceptions. The PLUMBER-2 framework generated consistent gap filled data for land surface modelling. We use the same methods (from the underlying FluxnetLSM package), to provide an expanded dataset covering more sites and site years. @@ -8,12 +8,12 @@ The data is generated using [set workflow]() and new releases generated using th ## Ecosystem flux data sources -We sourced data from openly available ecosystem flux networks or products, mainly ICOS, OneFlux processed data, the FLUXNET2015 dataset and PLUMBER-2 (which includes various data sources in its own right, see Ukkola et al. 2022). Data was sourced from these locations: +We sourced data from openly available ecosystem flux data products: -- ICOS data was provided through the ICOS carbon portal, this is a pre-release currently *not publicly available* -- FLUXNET2015 data can be retrieved from the [FLUXNET data portal](https://fluxnet.org/data/fluxnet2015-dataset/) -- OneFlux data can be retrieved from the [Ameriflux data portal](https://ameriflux.lbl.gov/data/download-data/) -- PLUMBER data can be downloaded using [an included script](https://github.com/geco-bern/FluxDataKit/blob/main/data-raw/00_download_plumber_data.R) +- PLUMBER-2: https://dx.doi.org/10.25914/5fdb0902607e1. Can be downloaded using [an included script](https://github.com/geco-bern/FluxDataKit/blob/main/data-raw/00_download_plumber_data.R) +- The latest Ameriflux release, downloaded data on 14 Oct 2023 from https://ameriflux.lbl.gov/. +- ICOS Drought2018 release from https://doi.org/10.18160/YVR0-4898. +- ICOS WarmWinter2020 release from https://doi.org/10.18160/2G60-ZHAK. - MODIS LAI/FPAR data is downloaded by an included script Data should be structured in the following directory structure and referenced @@ -22,21 +22,17 @@ to as such in the data generation workflow: ``` data/ ├─ modis/ + ├─ cloud_cover/ ├─ flux_data/ - ├─ fluxnet2015/ - ├─ icos/ - ├─ oneflux/ + ├─ plumber/ + ├─ icos_warmwinter2020/ + ├─ icos_drought2018/ ├─ ameriflux/ ``` ## Ecosystem flux data selection -Given the various datasets, and at times overlap between the datasets a priority in processing is given to more recent (hopefully) and more complete datasets. In order of processing this means that OneFlux has priority over FLUXNET2015, and Plumber2. ICOS data has priority over FLUXNET2015 for European sites. Overall, Plumber2 mostly fills in the remaining sites in Asia and Australia. The final picking order is thus: - -- ICOS -- OneFlux -- FLUXNET2015 -- PLUMBER-2 +The flux data source (PLUMBER-2, Ameriflux, ICOS WarmWinter2020, or ICOS Drought2018) is determined for each site based on which source provides the longest data time series. Site meta information is sourced from multiple sources to maximise available information. This is done in scripts `data-raw/01_collect_meta-data.R` and `data-raw/02_compile_final_site_list.R`. ## Data products @@ -46,21 +42,25 @@ We deliver gap filled ecosystem flux data in line with the PLUMBER dataset. We r #### Exceptions and processing differences -Contrary to the original PLUMBER data we report both data for a closed energy balance, and the raw data inputs (on request of some data users). Furthermore, we report both MODIS based leaf area index (LAI) and fraction of absorbed photosynthetic active radiation (FAPAR). Processing of the MODIS data was also altered and now follows a workflow similar to the one integrated in the {phenocamr} package. Data is smoothed using a LOESS based curve fitting with a BIC optimized smoothing kernel, instead of multiple cubic splines. +Contrary to the original PLUMBER data, we report both data for a closed energy balance, and the raw data inputs (on request of some data users). Furthermore, we report both MODIS-based leaf area index (LAI) and fraction of absorbed photosynthetic active radiation (FPAR). Processing of the MODIS data was also altered and now follows a workflow similar to the one integrated in the {phenocamr} package. Data is smoothed using a LOESS based curve fitting with a BIC optimized smoothing kernel, instead of multiple cubic splines. ### Half-hourly and daily FLUXNET data output (CSV) -To provide easily readable data as requested by some data users we convert the netCDF data to a human-readable CSV file adhering to FLUXNET column- and file-naming conventions. These half-hourly files are further downsampled to a daily time step for modelling efforts which require daily data. The daily data should be easily merged on a day by day basis with remote sensing data as provided by the FluxnetEO data product (Walther et al. 2022). +To provide easily readable data as requested by some data users we convert the NetCDF data to a human-readable CSV file adhering to FLUXNET column- and file-naming conventions. These half-hourly files are further downsampled to a daily time step for modelling efforts which require daily data. The daily data should be easily merged on a day by day basis with remote sensing data as provided by the FluxnetEO data product (Walther et al. 2022). > Downsampled daily data is an aggregation of the half-hourly data and not, as would be the case when downloading daily data from an ecosystem flux processing chain, a completely separate product. Some discrepancies therefore exist between the downsampled data and the equivalent daily ecosystem flux product. -### p-model drivers (structured R data) +### rsofun drivers (structured R data) + +A final data product derived from the initial gap-filled LSM data are driver data for the [`rsofun`](https://github.com/geco-bern/rsofun) package. In the current setup, *in-situ* measured model forcing data is combined with GPP and LE values (including their quality-control information) as target data for model calibration. + +### Additional data cleaning -A final data product derived from the initial gap-filled LSM data are p-model driver data for the [`rsofun`](https://github.com/geco-bern/rsofun) package. In the current setup *in-situ* environmental forcing will be combined with GPP values as target data for model calibration. +Information about the longest sequence of full years (365 days) of good-quality gapfilled daily GPP, LE, and LE_CORR data for each site is provided by package data `fdk_site_fullyearsequence`. This is created by `analysis/03_screen_rsofun_data.R`. It provides information about the start and end date and the full years for which sequences are available. ### Ancillary remote sensing data -For machine learning or other modelling purposes we provide ancillary MODIS based remote sensing data as described in the FluxnetEO dataset. We refer to the original publication and our [FluxnetEO](https://bg.copernicus.org/articles/19/2805/2022/) package for easy reading and processing of the data. +For machine learning or other modelling purposes, we provide ancillary MODIS based remote sensing data as described in the FluxnetEO dataset. We refer to the original publication and our [FluxnetEO](https://bg.copernicus.org/articles/19/2805/2022/) package for easy reading and processing of the data. ## Data and code availabilty diff --git a/analysis/03_screen_rsofun_data.R b/analysis/03_screen_rsofun_data.R index 26d33f3..f1acf11 100644 --- a/analysis/03_screen_rsofun_data.R +++ b/analysis/03_screen_rsofun_data.R @@ -2,7 +2,7 @@ library(tidyverse) library(FluxDataKit) -path <- "~/data/FluxDataKit/v3" # "/data/scratch/beta-v4" +path <- "~/data/FluxDataKit/v3" sites <- FluxDataKit::fdk_site_info |> filter(!(sitename %in% c("MX-Tes", "US-KS3"))) diff --git a/analysis/04_create_zenodo_upload.R b/analysis/04_create_zenodo_upload.R index d12b6f1..c03f7d4 100644 --- a/analysis/04_create_zenodo_upload.R +++ b/analysis/04_create_zenodo_upload.R @@ -11,33 +11,33 @@ # the Zenodo repository: # https://zenodo.org/record/7258291 -input_path <- "/data/scratch/beta-v4/" -tmp_path <- "/data/scratch/upload" +input_path <- "~/data/FluxDataKit/v3/" +tmp_path <- "~/data/FluxDataKit/v3/zenodo_upload/" #---- purge old data ----- -# remove temporary path -system(sprintf("rm -rf %s", tmp_path)) - -# recreate temporary path -dir.create(tmp_path) - -#---- copy new data over ---- -system( - sprintf( - "cp -R %s/lsm %s/lsm", - input_path, - tmp_path - ) -) - -system( - sprintf( - "cp -R %s/fluxnet %s/fluxnet", - input_path, - tmp_path - ) -) +# # remove temporary path +# system(sprintf("rm -rf %s", tmp_path)) +# +# # recreate temporary path +# dir.create(tmp_path) +# +# #---- copy new data over ---- +# system( +# sprintf( +# "cp -R %s/lsm %s/lsm", +# input_path, +# tmp_path +# ) +# ) +# +# system( +# sprintf( +# "cp -R %s/fluxnet %s/fluxnet", +# input_path, +# tmp_path +# ) +# ) #---- rename all files in place ---- diff --git a/data-raw/README.md b/data-raw/README.md index e3ba212..8dbcc6f 100644 --- a/data-raw/README.md +++ b/data-raw/README.md @@ -21,10 +21,10 @@ for them to function. Data was sourced from different locations: -- ICOS data was provided through the ICOS carbon portal, this is a pre-release currently *not publicly available* -- FLUXNET2015 data can be retrieved from the [FLUXNET data porta](https://fluxnet.org/data/fluxnet2015-dataset/) -- OneFlux data can be retrieved from the [Ameriflux data portal](https://ameriflux.lbl.gov/data/download-data/) -- PLUMBER data can be downloaded using [an included script](https://github.com/geco-bern/FluxDataKit/blob/main/data-raw/00_download_plumber_data.R) +- PLUMBER-2: https://dx.doi.org/10.25914/5fdb0902607e1. Can be downloaded using [an included script](https://github.com/geco-bern/FluxDataKit/blob/main/data-raw/00_download_plumber_data.R) +- The latest Ameriflux release, downloaded data on 14 Oct 2023 from https://ameriflux.lbl.gov/. +- ICOS Drought2018 release from https://doi.org/10.18160/YVR0-4898. +- ICOS WarmWinter2020 release from https://doi.org/10.18160/2G60-ZHAK. - MODIS LAI/FPAR data is downloaded by an included script ## Data structure diff --git a/data/fdk_site_fullyearsequence.rda b/data/fdk_site_fullyearsequence.rda index fb356cb..b6350e9 100644 Binary files a/data/fdk_site_fullyearsequence.rda and b/data/fdk_site_fullyearsequence.rda differ diff --git a/vignettes/01_setup.Rmd b/vignettes/01_setup.Rmd index f3a2ff5..4fe9e5f 100644 --- a/vignettes/01_setup.Rmd +++ b/vignettes/01_setup.Rmd @@ -1,6 +1,6 @@ --- title: "Setup and data provenance" -author: "Koen Hufkens" +author: "Koen Hufkens and Benjamin Stocker" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Setup and data provenance} @@ -8,7 +8,7 @@ vignette: > %\usepackage[utf8]{inputenc} --- -# Setup +## Setup Install all critical packages required in gathering and processing the data by installing the `FluxDataKit` package. @@ -29,7 +29,7 @@ only available in the source code of the project (see data provenance below). With all tools installed we need to download the required input data for aggregation. -# data provenance (downloads) +## Data provenance (downloads) A list of numbered scripts is provided in the `data-raw` directory of the cloned project, and which govern the downloading and compiling of intermediate data. @@ -50,9 +50,9 @@ When you want to alter the storage location you may use soft links. ## Meta data Meta-data is compiled on a site by site basis using the -`01_collect_meta-data.R` script. This will compile all easily available +`01_collect_meta-data.R` script. This compiles all easily available meta-data through either API calls or scraping the data downloaded in the -previous step (setup / data collection). In the current scirpt paths are set +previous step (setup / data collection). In the current script, paths are set for data stored in `data-raw`. However, you are free to move the data anywhere you like as long as you adjust the paths in the meta-data script. @@ -60,12 +60,11 @@ you like as long as you adjust the paths in the meta-data script. source("data-raw/01_collect_meta-data.R") ``` -Given the various datasets, and at times overlap between the datasets a priority -in processing is given to more recent (hopefully) and more complete datasets. In -order of processing this means that OneFlux has priority over FLUXNET2015, and -Plumber2. ICOS data has priority over FLUXNET2015 for European sites. Overall, -Plumber2 mostly fills in the remaining sites in Asia and Australia. The final -picking order is specified and processed by calling the below script. +The flux data source (PLUMBER-2, Ameriflux, ICOS WarmWinter2020, or ICOS +Drought2018) is determined for each site based on which source provides the +longest data time series. Site meta information is sourced from multiple sources +to maximise available information. This is done in scripts +`data-raw/01_collect_meta-data.R` and `data-raw/02_compile_final_site_list.R`. ```r source("data-raw/02_compile_final_site_list.R") @@ -74,45 +73,18 @@ source("data-raw/02_compile_final_site_list.R") ## Flux data All ecosystem flux sources should be downloaded at half-hourly (HH) rates. Data -sources and final paths are listed below. Top level paths for flux data are +sources and final paths are listed below. Top-level paths for flux data are considered to be sufficient for further processing. Estimated data sizes are provided to indicate network load, although for most these downloads should be easily manageable from a download and storage perspective. -Below you find a summary table of data volumes, nr. of sites and the local storage -paths for all flux data products considered. -| product | data volume | nr. sites | data path | -|----------|:-------------:|:------:|:------| -| FLUXNET2015 | ~35GB | 166 | `data-raw/flux_data/fluxnet2015/` -| ICOS | ~12GB | 67 | `data-raw/flux_data/icos/` -| Ameriflux OneFlux | ~12GB | 74 | `data-raw/flux_data/oneflux/` -| Plumber2 | ~4GB | 112 | `data-raw/flux_data/plumber_fluxnet/` - -### FLUXNET2015 - -FLUXNET2015 data is downloaded from the dedicated -[download page](https://fluxnet.org/data/fluxnet2015-dataset/). A login is -required to access the data. We only considered the legacy **FULLSET** data, -which is covered by a CC-BY 4.0 license. This limits the site count. Expanding -this list is possible, but only with permission from the PI sharing the data. -We refer to the [data policy](https://fluxnet.org/data/data-policy/) for more -details. - -### ICOS - -As of writing ICOS data was provided as a pre-release to our group and is therefore not -yet available for a wider public. However, this data should be released shortly through -the [ICOS carbon portal](https://www.icos-cp.eu/data-services/about-data-portal). - -### Ameriflux OneFlux - -A limited set of sites has been reprocessed using the Ameriflux OneFlux processing chain. -The data should be downloaded manually from the [website data portal](https://ameriflux.lbl.gov/login/?redirect_to=/data/download-data/). A -login is required. +We sourced data from openly available ecosystem flux data products: ### Plumber2 +Reference: https://dx.doi.org/10.25914/5fdb0902607e1. + "PLUMBER2 is a model inter-comparison project for land surface models. Multiple leading land surface and ecosystem models are evaluated for water and carbon fluxes at 170 flux tower sites, spanning multiple biomes and climate zones globally." @@ -121,39 +93,31 @@ the full description of the dataset can be found in the publication by FLUXNET2015, La Thuile and OzFlux collections. The downloading and conversion is facilitated using a script -`00_download_convert_flux_data.R` included in the `data-raw` directory. +`00_download_plumber_data.R` included in the `data-raw` directory. -## Gridded products +### Ameriflux OneFlux -Other gridded data products are required to complement the flux data for modelling -purposes. Products required, data volumes and storage paths are listed below. -Detailed links to original publications and data are provided below. +This is the latest Ameriflux release, downloaded data on 14 Oct 2023 from https://ameriflux.lbl.gov/. +The data should be downloaded manually from the [website data portal](https://ameriflux.lbl.gov/login/?redirect_to=/data/download-data/). A +login is required. -| product | data volume | data path | -|-----------|:---------------------:|:-----------| -| rooting zone water storage | ~80MB | `data-raw/ancillary_data/rooting_zone_water_storage/` -| Koeppen-Geiger | ~22MB | `data-raw/ancillary_data/koeppen_geiger/` -| field capacity | ~4MB | `data-raw/ancillary_data/field_capacity/` +### ICOS Drought2018 -### Rooting zone water storage capacity +Reference: https://doi.org/10.18160/YVR0-4898. -Global rooting zone water storage capacity can be -[downloaded here](https://zenodo.org/record/5515246) (Stocket et al. 2020). +### ICOS WarmWinter2020 -### Koeppen-Geiger (included in repo) +Reference: https://doi.org/10.18160/2G60-ZHAK. -Koeppen-Geiger climate classifications are downloaded from the recent -work by [Beck et al. 2018](https://www.nature.com/articles/sdata2018214). Data -can be downloaded from the [project website](http://www.gloh2o.org/koppen/) but -is also included in the repository due to its small size. +## Additional site meta info sources -### Field capacity (included in repo) +Additional data sources are used for compiling site meta information in script `data-raw/02_compile_final_site_list.R`. -Global Gridded Surfaces of Selected Soil Characteristics on the [ORNL DAAC]( -https://daac.ornl.gov/SOILS/guides/IGBP-SurfaceProducts.html) provides field -capacity values. Data needs to be manually downloaded from the [visualization -tools](https://webmap.ornl.gov/ogc/dataset.jsp?ds_id=569) for easy export into -a geotiff format. A ORNL login is required! +- Falge et al.: https://doi.org/10.3334/ORNLDAAC/1530 +- ICOS site list, downloaded from http://www.europe-fluxdata.eu/home/sites-list. Contained in this repo (`data-raw/meta_data/sites_list_icos.csv`) +- Koeppen-Geiger climate zone classification. File (22 MB) contained in this repo (`data-raw/ancillary_data/koeppen_geiger/`). +- Root zone water storage capacity based on [Stocker et al., 2023](https://www.nature.com/articles/s41561-023-01125-2). File not contained in this repo, but available on [Zenodo](https://doi.org/10.5281/zenodo.5515246). +- ETOPO1 digital elevation model (doi:10.7289/V5C8276M). File not contained in this repo. Obtainable from https://www.ngdc.noaa.gov/mgg/global/. ## Remote sensing data @@ -167,10 +131,8 @@ With the site list generated or manually populated you can download the data stored in the `data-raw/modis/` directory as: ```r -sites <- readRDS("data/flux_data_kit_site-info.rds") - fdk_download_modis( - df = sites, + df = fdk_site_info, path = "data-raw/modis/" ) ``` @@ -182,7 +144,7 @@ For batch processing we refer tot the `04_download_modis_data.R` script in the ### FluxnetEO -To supplement the land surface model driver data, and the derived p-model (rsofun) +To supplement the land surface model driver data, and the derived rsofun input, we used the `FluxnetEO` product (Walther et al. 2022) and similarly named [package](https://geco-bern.github.io/FluxnetEO/). diff --git a/vignettes/03_data_generation.Rmd b/vignettes/03_data_generation.Rmd index 8afc8cc..2d1d9f9 100644 --- a/vignettes/03_data_generation.Rmd +++ b/vignettes/03_data_generation.Rmd @@ -151,7 +151,7 @@ fdk_downsample_fluxnet( ) ``` -## p-model (rsofun) and Machine Learning formatting +## rsofun formatting In addition, MODIS data can be merged from the FluxnetEO dataset using the R package [with the same name](https://github.com/geco-bern/FluxnetEO).