11  Merging the files

As in Chapter 11, we merge the different dataset to produce the data table used in the local projections that uses quarterly data and not monthly data.

library(tidyverse)
── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
✔ dplyr     1.1.4     ✔ readr     2.1.4
✔ forcats   1.0.0     ✔ stringr   1.5.0
✔ ggplot2   3.4.4     ✔ tibble    3.2.1
✔ lubridate 1.9.3     ✔ tidyr     1.3.0
✔ purrr     1.0.2     
── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
✖ dplyr::filter() masks stats::filter()
✖ dplyr::lag()    masks stats::lag()
ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(cli)

11.1 Load Intermediate Files

The (quarterly) weather data (Chapter 1) can be loaded:

load("../data/output/weather/weather_regions_df_quarter.rda")

The agricultural data (Chapter 2):

load("../data/output/minagri/dataset_agri_2001_2015.rda")

The macroeconomic data and commodity prices (Chapter 3):

load("../data/output/macro/df_macro.rda")
load("../data/output/macro/df_int_prices.rda")

The share of natural regions and the El Niño–Southern Oscillation (Chapter 4):

load("../data/output/natural_region_dep.rda")
load("../data/output/weather/ONI_temp.rda")

11.2 Merge the Files

We add ENSO data to the weather dataset:

Weather <- weather_regions_df |> 
  # Add ENSO data
  left_join(
    ONI_temp |> 
      mutate(quarter = quarter(date)) |> 
      mutate(
        year = as.numeric(Year), 
        quarter = as.numeric(quarter)) |> 
      group_by(year, quarter) |> 
      summarise(ONI = mean(ONI)), 
    by = c(
      "year" = "year",
      "quarter" = "quarter"
    )
  ) |>
  group_by(IDDPTO, quarter) |> 
  mutate( 
    temp_min_dev_ENSO   = temp_min - mean(temp_min),
    temp_max_dev_ENSO   = temp_max - mean(temp_max),
    temp_mean_dev_ENSO  = temp_mean - mean(temp_mean),
    precip_sum_dev_ENSO = precip_sum - mean(precip_sum))|> 
  ungroup() |> 
  labelled::set_variable_labels(
    temp_min_dev_ENSO   = "Deviation of Min. Temperature from ENSO Normals",
    temp_max_dev_ENSO   = "Deviation of Max. Temperature from ENSO Normals",
    temp_mean_dev_ENSO  = "Deviation of Mean Temperature from ENSO Normals",
    precip_sum_dev_ENSO = "Deviation of Total Rainfall from ENSO Normals",
  )

For international prices:

int_prices <-  int_prices |> 
  mutate(quarter = quarter(date), 
         year    = year(date)) |>  
  group_by(product, quarter, year) |> 
  mutate(price_int      = mean(price_int)) |>  
  ungroup() |> 
  select(-date, - price_int_inf) |> 
  unique() |> 
  group_by(product, quarter) |> 
  arrange(year, quarter) |> 
  mutate(int_price_inf    = (price_int/lag(price_int) - 1)*100) |> 
  ungroup() |> 
  arrange(product, year, quarter) 

Let us merge all these datasets in a single one:

data_total <- 
  data_total |> 
  # Add macroeconomic data
  left_join(
    df_macro |> rename(gdp = y),
    by = "date"
  ) |> 
  mutate(quarter = quarter(date)) |> 
  dplyr::select(
    product_eng, region,region_id, product, quarter, year, Value_prod,
    rer_hp, r_hp, pi, ind_prod) |> 
  group_by(region, product, quarter, year) |> 
  mutate(
    Value_prod     = sum(Value_prod),   
    rer_hp         = mean(rer_hp),
    r_hp           = mean(r_hp),
    pi             = mean(pi),
    ind_prod       = mean(ind_prod)
  ) |>  
  ungroup() |> 
  unique() |> 
  # Add commodity prices data
  left_join(
    int_prices,
    by =  c(
      "product", "product_eng", "year", "quarter")
  ) |> 
  group_by(product, region) |> 
  mutate(
    int_price_inf = hpfilter(int_price_inf, freq = 1600, type = "lambda")[["cycle"]]
  ) |> 
# Add weather data and ENSO 
left_join(
  Weather |> 
    dplyr::select(-IDDPTO),
  by = c(
    "year" = "year",
    "quarter" = "quarter",
    "region" = "DEPARTAMEN"
  )
) 

Here are the first rows of that tibble:

data_total
# A tibble: 7,320 × 27
# Groups:   product, region [122]
   product_eng  region region_id product quarter  year Value_prod  rer_hp   r_hp
   <chr>        <chr>      <int> <chr>     <dbl> <dbl>      <dbl>   <dbl>  <dbl>
 1 Amylaceous … AMAZO…         1 MAÍZ A…       1  2001      1242.  1.71    1.61 
 2 Amylaceous … AMAZO…         1 MAÍZ A…       2  2001      2038   1.33    5.58 
 3 Amylaceous … AMAZO…         1 MAÍZ A…       3  2001      6092. -1.24   -0.599
 4 Amylaceous … AMAZO…         1 MAÍZ A…       4  2001       738  -2.13   -2.64 
 5 Amylaceous … AMAZO…         1 MAÍZ A…       1  2002      1217  -0.916  -2.98 
 6 Amylaceous … AMAZO…         1 MAÍZ A…       2  2002      3263  -1.41   -2.36 
 7 Amylaceous … AMAZO…         1 MAÍZ A…       3  2002      6762   1.36   -0.570
 8 Amylaceous … AMAZO…         1 MAÍZ A…       4  2002       251   0.0995  0.268
 9 Amylaceous … AMAZO…         1 MAÍZ A…       1  2003       524  -0.762   0.324
10 Amylaceous … AMAZO…         1 MAÍZ A…       2  2003      1394   0.0216  0.617
# ℹ 7,310 more rows
# ℹ 18 more variables: pi <dbl>, ind_prod <dbl>, price_int <dbl>,
#   int_price_inf <dbl>, temp_min <dbl>, temp_max <dbl>, temp_mean <dbl>,
#   precip_sum <dbl>, perc_gamma_precip <dbl>, temp_min_dev <dbl>,
#   temp_max_dev <dbl>, temp_mean_dev <dbl>, precip_sum_dev <dbl>, ONI <dbl>,
#   temp_min_dev_ENSO <dbl>, temp_max_dev_ENSO <dbl>, temp_mean_dev_ENSO <dbl>,
#   precip_sum_dev_ENSO <dbl>

11.3 Dataset for the Local Projections

Now, let us create the dataset specifically used to estimate the models.

Let us make sure that the region data are encoded as a factor.

data_total <- 
  data_total |> 
  mutate(region_id = factor(region_id))

The crops we focus on:

crops <- c("Rice", "Dent corn", "Potato", "Cassava")

The number of observation in each region, for each crop:

data_total |> 
  group_by(product_eng, region_id) |> 
  summarise(n = sum(Value_prod <= 0)) |> 
  arrange(desc(n))
`summarise()` has grouped output by 'product_eng'. You can override using the
`.groups` argument.
# A tibble: 122 × 3
# Groups:   product_eng [6]
   product_eng     region_id     n
   <chr>           <fct>     <int>
 1 Rice            14           53
 2 Cassava         17           46
 3 Wheat           18           45
 4 Wheat           20           39
 5 Rice            20           33
 6 Amylaceous corn 18           31
 7 Amylaceous corn 20           31
 8 Wheat           11           29
 9 Amylaceous corn 11           28
10 Dent corn       20           27
# ℹ 112 more rows

11.3.1 Detrending of the agricultural production

This section outlines a two-step procedure for detrending agricultural production data at the regional level for a specific crop and month. The procedure involves handling missing values and then performing the detrending process.

  • Step 1: Handling Missing Values

    In the first step, we address missing values by linear interpolation. This approach helps us estimate the missing values by considering the neighboring data points.

    • Step 1.1: Imputing missing values with linear interpolation.

      The missing values get replaced by linear interpolation. However, if there are more than two consecutive missing values, they are not replaced with interpolated values. Instead, the series for the specific crop in the given region is split based on the locations of the missing values. The split with the highest number of consecutive non-missing values is retained, while the other splits are discarded.

    • Step 1.2: Dropping Series with Remaining Missing Values

      After imputing missing values using the moving median, we check if any missing values still remain in the dataset. If there are any remaining missing values for a particular series, we choose to exclude that series from further analysis. By doing so, we ensure that the subsequent detrending process is performed only on reliable and complete data.

  • Step 2: Detrending the data

    Once we have addressed the missing values, we proceed to the second step, which involves detrending the data. Detrending aims to remove the long-term trend or seasonality from the dataset, leaving behind the underlying fluctuations and patterns. To remove the trend from the data, we follow a three-step process.

    • Step 2.1: Demeaning

      First, we compute the share of each data point, denoted as \(y_{c,i,m,t}^{demeaned}\), by dividing the raw data point \(y_{c,i,m,t}^{raw}\) by the sum of all raw data points for the given crop \(c\), region \(i\), and calendar month \(m\) over the entire time period \(T_c\): \[y_{c,i,m,t}^{demeaned}\frac{y^{raw}_{c,i,m,t}}{n_{T_C}\sum_{t=1}^{T_c}y^{raw}_{c,i,m}}\] Here, \(n_{T_c}\) represents the total number of data points for the given crop, region, and month.

    • Step 2.2: Quadratic Trend Estimation

      Next, we estimate a quadratic trend using ordinary least squares (OLS) regression. We model the demeaned data points \(y_{c,i,m,t}^{demeaned}\) as a quadratic function of time \(t\): \[y^{demeaned}_{c,i,m}=\beta_{c,i,m} t + \gamma_{c,i,m} t^{2} + \varepsilon_{c,i,m}\] In this equation, \(\varepsilon_{c,i,m}\) represents the error term assumed to follow a normal distribution.

    • Step 2.3: Detrending

      Once we have estimated the coefficients \(\beta_{c,i,m}\) and \(\gamma_{c,i,m}\) through OLS regression, we can remove the quadratic trend from the data. This is done by calculating the detrended data \(y_{c,i,m}^{d}\), which simply consists of the residuals \(e_{c,i,m}\): \[y^{d}_{c,i,m} = e_{c,i,m}\] The resulting detrended data \(y_{c,i,m}^{d}\) represents the original data with the quadratic trend component removed. We add the possibility to express the results in log.

Let us implement this process in R. First, we need to define two functions to handle the missing values:

  • The get_index_longest_non_na() function retrieves the indices of the longest consecutive sequence without missing values from a given input vector. It helps us identify the positions of elements in that sequence.

  • The keep_values_longest_non_na() function uses the obtained indices to create a logical vector. Each element of this vector indicates whether the corresponding element in the input vector belongs to the longest consecutive sequence of non-missing values. This allows us to filter the data and retain only the values from the longest consecutive sequence without missing values.

These two functions combined help us handle missing data in the weather series and ensure that we work with the most complete sequences for each region and crop.

The first function:

#' Returns the index of the longest sequence of non NA values in a vector
#'
#' @param y vector of numerical values
#' @export
get_index_longest_non_na <- function(y) {
  split_indices <- which(is.na(y))
  nb_obs <- length(y)

  if (length(split_indices) == 0) {
    res <- seq_len(nb_obs)
  } else {
    idx_beg <- c(1, split_indices)
    if (idx_beg[length(idx_beg)] != nb_obs) {
      idx_beg <- c(idx_beg, nb_obs)
    }
    lengths <- diff(idx_beg)
    ind_max <- which.max(lengths)
    index_beginning <- idx_beg[ind_max]
    if(!index_beginning == 1 | is.na(y[index_beginning])) {
      index_beginning <- index_beginning + 1
    }
    index_end <- idx_beg[ind_max] + lengths[ind_max]
    if(is.na(y[index_end])) {
      index_end <- index_end - 1
    }
    res <- seq(index_beginning, index_end)
  }
  res
}

The second one:

#' Returns a logical vector that identifies the longest sequence of non NA
#' values within the input vector
#' 
#' @param y numeric vector
keep_values_longest_non_na <- function(y) {
  ids_to_keep <- get_index_longest_non_na(y)
  ids <- seq(1, length(y))
  ids %in% ids_to_keep
}
Note

Those two functions are defined in weatherperu/R/utils.R.

We define a function, detrend_production(), that takes the data frame as input, as well as a crop name and a region ID. It returns the detrended production for that region, in a tibble. In the resuling tibble, the detrended values are in column named y.

#' Detrends the production data, using OLS
#'
#' @param df data
#' @param crop_name name of the crop
#' @param region_id id of the region
#' @param in_log if TRUE, the detrended values are expressed in log
#'
#' @returns data frame with the product, the region id, the date, and the
#'   detrended value for the production. If `in_log = TRUE` and there are zeros
#'   the function returns `NULL`. Similarly, if there are more than two
#' @export
#' @importFrom dplyr filter arrange mutate select row_number group_by
#' @importFrom tidyr nest unnest
#' @importFrom purrr map
#' @importFrom imputeTS na_interpolation
#' @importFrom stats lm predict residuals
detrend_production <- function(df,
                               crop_name,
                               region_id,
                               in_log = FALSE) {
  # The current data
  df_current <-
    df |>
    filter(
      product_eng == !!crop_name,
      region_id == !!region_id
    ) |>
    arrange(year, quarter)

  ## Dealing with missing values ----
  # Look for negative production values
  df_current <-
    df_current |>
    mutate(
      y_new = ifelse(Value_prod < 0, NA, Value_prod)
    )

  if (any(is.na(df_current$y_new))) {

    # Replacing NAs by interpolation
    # If there are more than two contiguous NAs, they are not replaced
    df_current <-
      df_current |>
      mutate(
        y_new = imputeTS::na_interpolation(y_new, maxgap = 3)
      )

    # Removing obs at the beginning/end if they are still missing
    df_current <-
      df_current |>
      mutate(
        row_to_keep = !(is.na(y_new) & row_number() %in% c(1:2, (n()-1):(n())))
      ) |>
      filter(row_to_keep) |>
      select(-row_to_keep)

    # Keeping the longest series of continuous non-NA values
    df_current <-
      df_current |>
      mutate(
        row_to_keep = keep_values_longest_non_na(y_new)
      ) |>
      filter(row_to_keep) |>
      select(-row_to_keep)
  }

  rle_y_new <- rle(df_current$y_new)
  check_contiguous_zeros <- rle_y_new$lengths[rle_y_new$values==0]

  if (any(check_contiguous_zeros >= 2)) {
    resul <- NULL
  } else {

    ## Detrending
    df_current <-
      df_current |>
      group_by(quarter) |>
      mutate(
        y_new_normalized = y_new / median(y_new)
      )

    if (any(is.infinite(df_current$y_new_normalized))) {
      resul <- NULL
    } else {
      resul <-
        df_current |>
        select(product_eng, region_id, year, quarter, y_new, y_new_normalized) |>
        group_by(quarter) |>
        arrange(year) |>
        mutate(t = row_number()) |>
        ungroup() |>
        nest(.by = c(product_eng, region_id, quarter)) |>
        # distinct OLS per quarter
        mutate(
          ols_fit   = map(data, ~ lm(y_new_normalized ~ -1 + t + I(t^2), data = .x)),
          resid     = map(ols_fit, residuals),
          fitted    = map(ols_fit, predict)
          # intercept = map(ols_fit, ~ coef(.x)[["(Intercept)"]])
        ) |>
        # unnest(cols = c(data, resid, intercept)) |>
        unnest(cols = c(data, resid, fitted)) |>
        group_by(quarter) |>
        mutate(
          y = resid + mean(fitted)
          # y = resid
        ) |>
        select(product_eng, region_id, year, quarter, y_new, y) |>
        ungroup() |>
        arrange(year)

      if (in_log) {
        resul <- resul |>
          mutate(y = log(y))
      }
    }
  }
  resul
}

For example, for potatoes in region with id 1:

detrend_production(
  df = data_total, 
  crop_name = "Potato", 
  region_id = 1
)
# A tibble: 60 × 6
   product_eng region_id  year quarter  y_new     y
   <chr>       <fct>     <dbl>   <dbl>  <dbl> <dbl>
 1 Potato      1          2001       1  9786   1.52
 2 Potato      1          2001       2 14648.  1.64
 3 Potato      1          2001       3 14670.  1.56
 4 Potato      1          2001       4 10972   1.55
 5 Potato      1          2002       1 14682.  1.67
 6 Potato      1          2002       2 15682.  1.48
 7 Potato      1          2002       3 14384   1.34
 8 Potato      1          2002       4 12348   1.44
 9 Potato      1          2003       1 12597   1.28
10 Potato      1          2003       2 17262   1.38
# ℹ 50 more rows

We can apply this function to all crops of interest, in each region. Let us define a table that contains all the possible values for the combination of crops and regions:

product_and_regions <- 
  data_total |> 
  filter(product_eng %in% crops) |> 
  select(product_eng, region_id) |> 
  unique()
Adding missing grouping variables: `product`, `region`

We will not transform the detrended values by applying the logarithm function:

# Shoudl the detrended value be transformed in log?
in_log <- FALSE

Then we apply the detrend_production() function to all these different cases, and store the results in a list named df_detrended_production:

df_detrended_production <- vector(mode = "list", length = nrow(product_and_regions))
cli_progress_bar(total = nrow(product_and_regions))
for(i in 1:nrow(product_and_regions)){
  df_detrended_production[[i]] <- detrend_production(
    df = data_total, 
    crop_name = product_and_regions$product_eng[i], 
    region_id = product_and_regions$region_id[i],
    in_log = in_log
  )
  cli_progress_update(set = i)
}
Registered S3 method overwritten by 'quantmod':
  method            from
  as.zoo.data.frame zoo 
■■■■■■■■■■■■■■■                   45% | ETA:  1s
■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■  100% | ETA:  0s

The elements of the list are all tibbles, with the same column names. We can merge them in a single tibble.

df_detrended_production <- bind_rows(df_detrended_production)

We can have a look at the number of quarters with 0 values for the agricultural production. Recall that the series where more than two contiguous 0 were observed were discarded from the result.

df_detrended_production |> 
  group_by(product_eng, region_id) |> 
  summarise(nb_0 = sum(y_new == 0)) |> 
  arrange(desc(nb_0))
`summarise()` has grouped output by 'product_eng'. You can override using the
`.groups` argument.
# A tibble: 68 × 3
# Groups:   product_eng [4]
   product_eng region_id  nb_0
   <chr>       <fct>     <int>
 1 Cassava     4             7
 2 Cassava     10            5
 3 Dent corn   5             3
 4 Rice        18            3
 5 Dent corn   4             2
 6 Dent corn   16            2
 7 Cassava     2             1
 8 Dent corn   18            1
 9 Rice        12            1
10 Cassava     1             0
# ℹ 58 more rows

Now, let us add the other columns to the tibble that contains the detrended data:

df <- df_detrended_production |> 
  left_join(
    data_total,
    join_by(product_eng, region_id, year, quarter)
  )

Let us also impute missing values for the weather variables.

weather_variables <- 
  weather_regions_df |> 
  select(where(is.numeric)) |> 
  select(-year, -quarter) |> 
  colnames()

The current number of missing values:

df |> 
  summarise(
    across(
      .cols = !!weather_variables,
      .fns = ~ sum(is.na(.x)),
      .names = "{.col}_nb_na"
    )
  ) |> 
  unlist()
         temp_min_nb_na          temp_max_nb_na         temp_mean_nb_na 
                      0                       0                       0 
       precip_sum_nb_na perc_gamma_precip_nb_na      temp_min_dev_nb_na 
                      0                       0                       0 
     temp_max_dev_nb_na     temp_mean_dev_nb_na    precip_sum_dev_nb_na 
                      0                       0                       0 

In case of missing values, we use linear interpolation to replace them:

df <- 
  df |> 
  mutate(
    across(
      .cols = !!weather_variables,
      .fns = ~ imputeTS::na_interpolation(.x, maxgap = 3)
    )
  )

The number of remaining missing values:

df |> 
  summarise(
    across(
      .cols = !!weather_variables,
      .fns = ~ sum(is.na(.x)),
      .names = "{.col}_nb_na"
    )
  ) |> 
  unlist()
         temp_min_nb_na          temp_max_nb_na         temp_mean_nb_na 
                      0                       0                       0 
       precip_sum_nb_na perc_gamma_precip_nb_na      temp_min_dev_nb_na 
                      0                       0                       0 
     temp_max_dev_nb_na     temp_mean_dev_nb_na    precip_sum_dev_nb_na 
                      0                       0                       0 

We add labels to the new columns:

df <- 
  df |> 
  labelled::set_variable_labels(
    y_new = "Quarterly Agricultural Production (tons)",
    y = "Quarterly Agricultural Production (pct. deviation from quarterly trend)"
  )

11.3.2 Saving the file

The dataset that can be used to estimate the impact of weather shocks on agricultural production can be saved in the data output folder:

save(df, file = "../data/output/df_lp_quarter.rda")