Given some raw JSON from an aggs query in Elasticsearch, parse the aggregations into a data.table.
Examples
# A sample raw result from an aggs query combining date_histogram and extended_stats:
result <- '{"aggregations":{"dateTime":{"buckets":[{"key_as_string":"2016-12-01T00:00:00.000Z",
"key":1480550400000,"doc_count":123,"num_potatoes":{"count":120,"min":0,"max":40,"avg":15,
"sum":1800,"sum_of_squares":28000,"variance":225,"std_deviation":15,"std_deviation_bounds":{
"upper":26,"lower":13}}},{"key_as_string":"2017-01-01T00:00:00.000Z","key":1483228800000,
"doc_count":134,"num_potatoes":{"count":131,"min":0,"max":39,"avg":16,"sum":2096,
"sum_of_squares":34000,"variance":225,"std_deviation":15,"std_deviation_bounds":{"upper":26,
"lower":13}}}]}}}'
# Parse into a data.table
aggDT <- chomp_aggs(aggs_json = result)
print(aggDT)
#> dateTime num_potatoes.count num_potatoes.min
#> <char> <int> <int>
#> 1: 2016-12-01T00:00:00.000Z 120 0
#> 2: 2017-01-01T00:00:00.000Z 131 0
#> num_potatoes.max num_potatoes.avg num_potatoes.sum
#> <int> <int> <int>
#> 1: 40 15 1800
#> 2: 39 16 2096
#> num_potatoes.sum_of_squares num_potatoes.variance num_potatoes.std_deviation
#> <int> <int> <int>
#> 1: 28000 225 15
#> 2: 34000 225 15
#> num_potatoes.std_deviation_bounds.upper
#> <int>
#> 1: 26
#> 2: 26
#> num_potatoes.std_deviation_bounds.lower doc_count
#> <int> <int>
#> 1: 13 123
#> 2: 13 134