mtt_haum/code/01_preprocessing.R

137 lines
4.1 KiB
R
Raw Normal View History

2024-02-13 09:03:35 +01:00
# 01_preprocessing.R
2023-10-25 17:12:22 +02:00
#
# content: (1) Parse raw log files
# (2) Create event logs
# (3) Add meta data
#
# input: raw log files from ../data/haum/*.log
# ../data/metadata/feiertage.csv
# ../data/metadata/schulferien_2016-2018_NI.csv
# ../data/metadata/schulferien_2019-2025_NI.csv
2024-03-22 12:07:45 +01:00
# output: results/raw_logfiles_<timestamp>.csv
# results/event_logfiles_<timestamp>.csv
2023-10-25 17:12:22 +02:00
#
# last mod: 2024-02-23, NW
2023-10-25 17:12:22 +02:00
# setwd("C:/Users/nwickelmaier/Nextcloud/Documents/MDS/2023ss/60100_master_thesis/analysis/code")
#library(mtt)
devtools::load_all("../../../../../software/mtt")
now <- format(Sys.time(), "%Y-%m-%d_%H-%M-%S")
2023-10-25 17:12:22 +02:00
#--------------- (1) Parse raw log files ---------------
2024-01-02 15:37:37 +01:00
path <- "../data/haum/LogFiles/"
folders <- dir(path)
#folders <- "2016"
2024-01-02 15:37:37 +01:00
datraw <- parse_logfiles(folders, path)
# 91 corrupt lines have been found and removed from the data set
2024-03-22 12:07:45 +01:00
# datraw <- read.table("results/raw_logfiles_2023-10-25_16-20-45.csv",
2024-01-02 15:37:37 +01:00
# sep = ";", header = TRUE)
2023-10-25 17:12:22 +02:00
## Export data
2024-03-22 12:07:45 +01:00
write.table(datraw, paste0("results/raw_logfiles_", now, ".csv"),
2024-01-02 15:37:37 +01:00
sep = ";", row.names = FALSE)
2023-10-25 17:12:22 +02:00
#--------------- (2) Create event logs ---------------
datlogs <- create_eventlogs(datraw,
2024-01-02 15:37:37 +01:00
#xmlpath = "../data/haum/ContentEyevisit/eyevisit_cards_light/",
2024-01-16 09:59:23 +01:00
glossar = FALSE, save = TRUE)
# 2,136,694 no change moves removed
2024-01-02 15:37:37 +01:00
# OLD:
2024-01-02 15:37:37 +01:00
# 6,064 glossar entries, that could not be matched, have been removed
# 2,136,715 no change move events have been removed
# items <- unique(datlogs$item)
# topics <- extract_topics(items, xmlfiles = paste0(items, ".xml"),
# xmlpath = "../data/haum/ContentEyevisit/eyevisit_cards_light/")
2024-01-02 15:37:37 +01:00
# Indices for topics:
# 0 artist
# 1 thema
# 2 komposition
# 3 leben des kunstwerks
# 4 details
# 5 licht und farbe
# 6 extra info
# 7 technik
# ATTENTION: Need to know which topic maps onto which index!
datlogs$topic <- factor(datlogs$topic, levels = 0:7,
labels = c("artist", "thema", "komposition",
"leben des kunstwerks", "details",
"licht und farbe", "extra info",
"technik"))
2023-10-25 17:12:22 +02:00
#--------------- (3) Add meta data ---------------
## Read data for holiday
hd0 <- read.table("../data/metadata/feiertage.csv", sep = ";", header = TRUE)
hd0$X.br. <- NULL
hd <- hd0[hd0$Abkuerzung == "NI", ]
names(hd) <- c("state", "stateCode", "date", "holiday")
hd$date <- as.POSIXct(hd$date)
2024-01-02 15:37:37 +01:00
hd$state <- NULL
hd$stateCode <- NULL
## Read data for school vacations
# https://ferien-api.de/#holidaysPerStateAndYear
# Data extracted (on Linux) via:
# curl https://ferien-api.de/api/v1/holidays/NI > schulferien_NI.json
# library(jsonlite)
#
# dat <- read_json("data/metadata/schulferien_NI.json", simplify = TRUE)
# dat$slug <- NULL
#
# dat$name <- paste0(gsub("^(.*).niedersachsen.*", "\\1", dat$name),
# gsub("^.*niedersachsen [0-9]{4}(.*)", "\\1",
# dat$name))
#
# write.table(dat, "data/metadata/schulferien_2019-2025_NI.csv", sep = ";",
# row.names = FALSE, quote = FALSE)
sf1 <- read.table("../data/metadata/schulferien_2016-2018_NI.csv", sep = ";",
header = TRUE)
sf2 <- read.table("../data/metadata/schulferien_2019-2025_NI.csv", sep = ";",
header = TRUE)
sf <- rbind(sf1, sf2)
sf$start <- as.Date(sf$start)
sf$end <- as.Date(sf$end)
sfdat <- NULL
for (i in seq_len(nrow(sf))) {
date <- seq(sf$start[i], sf$end[i], by = 1)
sfdat <- rbind(sfdat, data.frame(date, vacation = sf$name[i],
2024-01-02 15:37:37 +01:00
stateCode = sf$stateCode[i]))
}
sfdat$stateCode <- NULL
2023-10-25 17:12:22 +02:00
## Merge data
2024-01-02 15:37:37 +01:00
datlogs$date <- as.Date(datlogs$date.start)
2024-01-02 15:37:37 +01:00
dat1 <- merge(datlogs, hd, by.x = "date", by.y = "date", all.x = TRUE)
dat2 <- merge(dat1, sfdat, by.x = "date", by.y = "date", all.x = TRUE)
2024-01-02 15:37:37 +01:00
dat2$date <- NULL
dat2 <- dat2[order(dat2$fileId.start, dat2$date.start, dat2$timeMs.start), ]
## Export data
2024-03-22 12:07:45 +01:00
write.table(dat2, paste0("results/event_logfiles_", now, ".csv"),
sep = ";", row.names = FALSE)