Chapter 10 Grid Search Analysis
If you are interested in the extent of an animal’s movement, use the following grid search analysis.
Like in Chapter 8 - Habitat Use, the grid search analysis divides an area into a grid, and calculates the received signal strength at each node. Then this process is repeated for each time step in a series of detections recorded by the node network.
10.1 Loading Settings
10.1.3 Load settings
# Specify the path to your database file
database_file <- "./data/Meadows V2/meadows.duckdb"
# (OPTIONAL) Specify Node time offsets if necessary
node_time_offset_file <- "./data/Meadows V2/node_time_offset_20230802.csv"
node_toff_df <- read.csv(node_time_offset_file)
# Specify the tag ID that you want to locate
my_tag_id <- "072A6633"
# Specify the RSSI vs Distance fit coefficients from calibration
a <- -103.0610446987
b <- -60.6023833206
c <- 0.0120558164
rssi_coefs <- c(a, b, c)
# Specify the time range of node data you want to import for this analysis
# This range should cover a large time window where your nodes were in
# a constant location. All node health records in this time window
# will be used to accurately determine the position of your nodes
# IMPORTANT! If you have included a node time offset file,
# make sure it was calculated using the same start and stop times as below.
node_start_time <- as.POSIXct("2023-08-01 00:00:00", tz = "GMT")
node_stop_time <- as.POSIXct("2023-08-07 00:00:00", tz = "GMT")
# Specify time range of detection data you want to pull from the DB
det_start_time <- as.POSIXct("2023-08-03 00:00:00", tz = "GMT")
det_stop_time <- as.POSIXct("2023-08-04 00:00:00", tz = "GMT")
# Specify a list of node Ids if you only want to include a subset in calibration
# IF you want to use all nodes, ignore this line and SKIP the step below
# where the data frame is trimmed to only nodes in this list
# my_nodes <- c("B25AC19E", "44F8E426", "FAB6E12", "1EE02113", "565AA5B9", "EE799439", "1E762CF3", "A837A3F4", "484ED33B")
# You can specify an alternative map tile URL to use here
my_tile_url <- "https://mt2.google.com/vt/lyrs=y&x={x}&y={y}&z={z}"
10.3 Get Node Locations
# Calculate the average node locations
node_locs <- calculate_node_locations(node_health_df)
# Plot the average node locations
node_loc_plot <- plot_node_locations(node_health_df,
node_locs,
theme = classic_plot_theme())
node_loc_plot
# Write the node locations to a file
export_node_locations("./results/node_locations.csv", node_locs)
# Draw a map with the node locations
node_map <- map_node_locations(node_locs, tile_url = my_tile_url)
node_map
10.4 Load Station Detection Data
# Load from DB
con <- DBI::dbConnect(duckdb::duckdb(),
dbdir = database_file,
read_only = TRUE)
detection_df <- tbl(con, "raw") |>
filter(time >= det_start_time & time <= det_stop_time) |>
filter(tag_id == my_tag_id) |>
collect()
DBI::dbDisconnect(con)
detection_df <- detection_df %>% mutate(time_value = as.integer(time))
10.5 Build a Node Grid
grid_center_lat <- 38.93664800
grid_center_lon <- -74.9462
grid_size_x <- 500 # meters
grid_size_y <- 800 # meters
grid_bin_size <- 5 # meters
# Create a data frame with the details about the grid
grid_df <- build_grid(
node_locs = node_locs,
center_lat = grid_center_lat,
center_lon = grid_center_lon,
x_size_meters = grid_size_x,
y_size_meters = grid_size_y,
bin_size = grid_bin_size
)
# Draw all of the grid bins on a map
grid_map <- draw_grid(node_locs, grid_df)
grid_map
10.6 (Optional) Calculate Test Solution
test_time <- as.POSIXct("2023-08-03 19:57:50", tz = "GMT")
test_rec_df <- calc_receiver_values(
current_time = test_time,
det_window = 60,
station_tag_df = detection_df,
node_locs = node_locs,
node_t_offset = node_toff_df,
rssi_coefs = rssi_coefs,
filter_alpha = 0.7,
filter_time_range = 120
)
print(test_rec_df)
# Find the GridSearch Solution
test_grid_values <- calc_grid_values(grid_df, test_rec_df, rssi_coefs)
solution <- subset(test_grid_values, test_grid_values$value == max(test_grid_values$value))
print(solution)
# Multilateration calculation
reduced_rec_df <- subset.data.frame(test_rec_df, test_rec_df$filtered_rssi >= a)
node_w_max <- reduced_rec_df[reduced_rec_df$filtered_rssi == max(reduced_rec_df$filtered_rssi),]
multilat_fit <- nls(reduced_rec_df$exp_dist ~ haversine(reduced_rec_df$lat,reduced_rec_df$lon,ml_lat,ml_lon),
reduced_rec_df,
start= list(ml_lat = node_w_max$lat, ml_lon = node_w_max$lon),
control=nls.control(warnOnly = T, minFactor=1/65536, maxiter = 100)
)
print(multilat_fit)
co <- coef(summary(multilat_fit))
print(paste(co[1,1],co[2,1]))
multilat_result <- c(co[1,1],co[2,1])
test_map <- draw_single_solution(test_rec_df,
test_grid_values,
solution,
multilat_result,
my_tile_url)
test_map
10.7 Calculate Track
track_df <- calculate_track(
start_time = "2023-08-03 19:50:45",
length_seconds = 1050,
step_size_seconds = 10,
det_time_window = 60, # Must have detection within this window to be included in position calculation
filter_alpha = 0.7,
filter_time_range = 120, # Time range to include detections in filtered value
grid_df = grid_df,
detection_df = detection_df,
node_locs = node_locs,
node_t_offset = node_toff_df,
rssi_coefs = rssi_coefs,
track_frame_output_path = NULL # If NULL no individual frames will be saved
)
print(track_df)
track_map <- map_track(node_locs, track_df, my_tile_url)
track_map
10.8 (Optional) Compare with Known Track
# If you've recorded a test track with the sidekick and want to see how well you
# are able to recreate it you can use the commands below.
# load test track from celltracktech package; if you recorded a test track, you
# would need to load it from the .csv file
# sidekick_df <- read_csv('./path/to/csv_file')
sidekick_df <- celltracktech::sidekick_cal_test2
# Correct sidekick time formatting
sidekick_df <- sidekick_df %>% mutate(time_utc = substring(c(sidekick_df$time_utc), 1, 19))
# Add numerical time value column
sidekick_df <- sidekick_df %>% mutate(time_value = get_time_value(sidekick_df$time_utc))
# Trim Sidekick data to the time of the calculated track
sidekick_df <- subset.data.frame(sidekick_df, time_value <= max(track_df$time))
track_error_df <- calc_track_error(sidekick_df, track_df)
print(track_error_df)
print(min(track_error_df$error))
print(max(track_error_df$error))
print(paste("Grid Search Solution Error = ",
mean(track_error_df$error),
" +/- ",
sd(track_error_df$error)))
print(paste("Multilateration Solution Error = ",
mean(track_error_df$ml_error),
" +/- ",
sd(track_error_df$ml_error)))
compare_map <- map_track_error(node_locs,
track_error_df,
sidekick_df,
my_tile_url)
compare_map
10.9 Plot Uncertainty Analysis
# plot grid search analysis error and multilateration error across track point
ggplot() +
geom_point(data = track_error_df,
aes(x = i,
y = ml_error,
color = 'Multilateration Error')) +
geom_point(data = track_error_df,
aes(x = i,
y = error,
color = 'Grid Search Error')) +
labs(color = 'Error Type') +
scale_color_manual(values=c('Grid Search Error' = 'black',
'Multilateration Error' = 'orange'))+
xlab("Track Point #") +
ylab("Solution Error (m)") +
classic_plot_theme()
# plotting grid search analysis error across max rssi (dBm)
ggplot() +
geom_point(data = track_error_df,
aes(x = max_rssi,
y = ml_error,
color = 'Multilateration Error')) +
geom_point(data = track_error_df,
aes(x = max_rssi,
y = error,
color = 'Grid Search Error')) +
labs(color = 'Error Type') +
scale_color_manual(values=c('Grid Search Error' = 'black',
'Multilateration Error' = 'orange'))+
xlab("Max RSSI (dBm)") +
ylab("Position Error (m)") +
classic_plot_theme()