# 09_user_navigation.R # # content: (1) Read data # (1.1) Read log event data # (1.2) Extract additional infos for clustering # (2) Clustering # (3) Fit tree # (3) Investigate variants # # input: results/haum/event_logfiles_2024-02-21_16-07-33.csv # output: results/haum/event_logfiles_pre-corona_with-clusters_cases.csv # results/haum/dattree.csv # # last mod: 2024-02-27 # setwd("C:/Users/nwickelmaier/Nextcloud/Documents/MDS/2023ss/60100_master_thesis/analysis/code") library(bupaverse) library(factoextra) #--------------- (1) Read data --------------- #--------------- (1.1) Read log event data --------------- dat0 <- read.table("results/haum/event_logfiles_2024-02-21_16-07-33.csv", colClasses = c("character", "character", "POSIXct", "POSIXct", "character", "integer", "numeric", "character", "character", rep("numeric", 3), "character", "character", rep("numeric", 11), "character", "character"), sep = ";", header = TRUE) dat0$event <- factor(dat0$event, levels = c("move", "flipCard", "openTopic", "openPopup")) dat0$topic <- factor(dat0$topic) dat0$weekdays <- factor(weekdays(dat0$date.start), levels = c("Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag", "Sonntag"), labels = c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday")) # Select data pre Corona dat <- dat0[as.Date(dat0$date.start) < "2020-03-13", ] dat <- dat[dat$path != 106098, ] rm(dat0) #--------------- (1.2) Extract additional infos for clustering --------------- datcase <- aggregate(cbind(distance, scaleSize, rotationDegree) ~ case, dat, function(x) mean(x, na.rm = TRUE), na.action = NULL) datcase$length <- aggregate(item ~ case, dat, length)$item eventtab <- aggregate(event ~ case, dat, table)["case"] eventtab$nmove <- aggregate(event ~ case, dat, table)$event[, "move"] eventtab$nflipCard <- aggregate(event ~ case, dat, table)$event[, "flipCard"] eventtab$nopenTopic <- aggregate(event ~ case, dat, table)$event[, "openTopic"] eventtab$nopenPopup <- aggregate(event ~ case, dat, table)$event[, "openPopup"] topictab <- aggregate(topic ~ case, dat, table)["case"] topictab$artist <- aggregate(topic ~ case, dat, table)$topic[, 1] topictab$details <- aggregate(topic ~ case, dat, table)$topic[, 2] topictab$extra_info <- aggregate(topic ~ case, dat, table)$topic[, 3] topictab$komposition <- aggregate(topic ~ case, dat, table)$topic[, 4] topictab$leben_des_kunstwerks <- aggregate(topic ~ case, dat, table)$topic[, 5] topictab$licht_und_farbe <- aggregate(topic ~ case, dat, table)$topic[, 6] topictab$technik <- aggregate(topic ~ case, dat, table)$topic[, 7] topictab$thema <- aggregate(topic ~ case, dat, table)$topic[, 8] datcase <- datcase |> merge(eventtab, by = "case", all = TRUE) |> merge(topictab, by = "case", all = TRUE) rm(eventtab, topictab) datcase$ntopiccards <- aggregate(topic ~ case, dat, function(x) ifelse(all(is.na(x)), NA, length(na.omit(x))), na.action = NULL)$topic datcase$ntopics <- aggregate(topic ~ case, dat, function(x) ifelse(all(is.na(x)), NA, length(unique(na.omit(x)))), na.action = NULL)$topic datcase$nitems <- aggregate(item ~ case, dat, function(x) length(unique(x)), na.action = NULL)$item datcase$npaths <- aggregate(path ~ case, dat, function(x) length(unique(x)), na.action = NULL)$path datcase$vacation <- aggregate(vacation ~ case, dat, function(x) ifelse(all(is.na(x)), 0, 1), na.action = NULL)$vacation datcase$holiday <- aggregate(holiday ~ case, dat, function(x) ifelse(all(is.na(x)), 0, 1), na.action = NULL)$holiday datcase$weekend <- aggregate(weekdays ~ case, dat, function(x) ifelse(any(x %in% c("Saturday", "Sunday")), 1, 0), na.action = NULL)$weekdays datcase$morning <- aggregate(date.start ~ case, dat, function(x) ifelse(lubridate::hour(x[1]) > 13, 0, 1), na.action = NULL)$date.start dat_split <- split(dat, ~ case) time_minmax <- function(subdata) { subdata$min_time <- min(subdata$timeMs.start) if (all(is.na(subdata$timeMs.stop))) { subdata$max_time <- NA } else { subdata$max_time <- max(subdata$timeMs.stop, na.rm = TRUE) } subdata } # TODO: Export from package mtt dat_list <- pbapply::pblapply(dat_split, time_minmax) dat_minmax <- dplyr::bind_rows(dat_list) datcase$min_time <- aggregate(min_time ~ case, dat_minmax, unique)$min_time datcase$max_time <- aggregate(max_time ~ case, dat_minmax, unique)$max_time datcase$duration <- datcase$max_time - datcase$min_time datcase$min_time <- NULL datcase$max_time <- NULL check_infocards <- function(subdata, artworks) { infocard_only <- NULL if(any(unique(subdata$item) %in% artworks)) { infocard_only <- FALSE } else { infocard_only <- TRUE } as.numeric(infocard_only) } # TODO: Move to helper file artworks <- unique(dat$item)[!unique(dat$item) %in% c("501", "502", "503")] datcase$infocardOnly <- pbapply::pbsapply(dat_split, check_infocards, artworks = artworks) # Clean up NAs datcase$distance <- ifelse(is.na(datcase$distance), 0, datcase$distance) datcase$scaleSize <- ifelse(is.na(datcase$scaleSize), 1, datcase$scaleSize) datcase$rotationDegree <- ifelse(is.na(datcase$rotationDegree), 0, datcase$rotationDegree) datcase$artist <- ifelse(is.na(datcase$artist), 0, datcase$artist) datcase$details <- ifelse(is.na(datcase$details), 0, datcase$details) datcase$extra_info <- ifelse(is.na(datcase$extra_info), 0, datcase$extra_info) datcase$komposition <- ifelse(is.na(datcase$komposition), 0, datcase$komposition) datcase$leben_des_kunstwerks <- ifelse(is.na(datcase$leben_des_kunstwerks), 0, datcase$leben_des_kunstwerks) datcase$licht_und_farbe <- ifelse(is.na(datcase$licht_und_farbe), 0, datcase$licht_und_farbe) datcase$technik <- ifelse(is.na(datcase$technik), 0, datcase$technik) datcase$thema <- ifelse(is.na(datcase$thema), 0, datcase$thema) datcase$ntopics <- ifelse(is.na(datcase$ntopics), 0, datcase$ntopics) datcase$ntopiccards <- ifelse(is.na(datcase$ntopiccards), 0, datcase$ntopiccards) cor_mat <- cor(datcase[, -1], use = "pairwise") diag(cor_mat) <- NA heatmap(cor_mat) normalize <- function(x) { (x - min(x)) / (max(x) - min(x)) } # TODO: Move to helper file # Features for navigation types for MTT: # - Scanning / Overviewing: # * Proportion of artworks looked at is high # * Duration per artwork is low: "ave_duration_item" / datcase$duration # - Exploring: # * Looking at additional information is high # - Searching / Studying: # * Proportion of artworks looked at is low # * Opening few cards # datcase$nflipCard / mean(datcase$nflipCard) or median(datcase$nflipCard) is low # * but for most cards popups are opened: # datcase$nopenPopup / datcase$nflipCard is high # - Wandering / Flitting: # * Proportion of moves is high # * Duration per case is low: # datcase$duration / mean(datcase$duration) or median(datcase$duration) # * Duration per artwork is low: "ave_duration_item" / datcase$duration dattree <- data.frame(case = datcase$case, NumItems = datcase$nitems, NumTopic = datcase$nopenTopic, NumPopup = datcase$nopenPopup, PropMoves = datcase$nmove / datcase$length, PathLinearity = datcase$nitems / datcase$npaths, Singularity = datcase$npaths / datcase$length ) dattree$NumTopic <- ifelse(is.na(dattree$NumTopic), 0, dattree$NumTopic) dattree$NumPopup <- ifelse(is.na(dattree$NumPopup), 0, dattree$NumPopup) get_centrality <- function(case, data) { data$start <- data$date.start data$complete <- data$date.stop alog <- activitylog(data[data$case == case, ], case_id = "case", activity_id = "item", resource_id = "path", timestamps = c("start", "complete")) net <- process_map(alog, render = FALSE) inet <- DiagrammeR::to_igraph(net) c(igraph::centr_degree(inet, loops = FALSE)$centralization, igraph::centr_degree(inet, loops = TRUE)$centralization, igraph::centr_betw(inet)$centralization) } # TODO: Move to helper file # centrality <- lapply(dattree$case, get_centrality, data = dat) # centrality <- do.call(rbind, centrality) # # save(centrality, file = "results/haum/tmp_centrality.RData") load("results/haum/tmp_centrality.RData") #dattree$DegreeCentrality <- centrality[, 2] dattree$BetweenCentrality <- centrality[, 3] ## Add average duration per item dat_split <- split(dat[, c("item", "case", "path", "timeMs.start", "timeMs.stop")], ~ path) dat_list <- pbapply::pblapply(dat_split, time_minmax) dat_minmax <- dplyr::bind_rows(dat_list) tmp <- aggregate(min_time ~ path, dat_minmax, unique) tmp$max_time <- aggregate(max_time ~ path, dat_minmax, unique, na.action = NULL)$max_time tmp$duration <- tmp$max_time - tmp$min_time tmp$case <- aggregate(case ~ path, dat_minmax, unique)$case dattree$AvDurItem <- aggregate(duration ~ case, tmp, mean)$duration #dattree$AvDurItem <- dattree$AvDurItem / datcase$duration rm(tmp) summary(dattree) plot(dattree[, -1], pch = ".") par(mfrow = c(2,4)) hist(dattree$AvDurItem, breaks = 50, main = "") hist(dattree$NumItems, breaks = 50, main = "") hist(dattree$NumTopic, breaks = 50, main = "") hist(dattree$NumPopup, breaks = 50, main = "") hist(dattree$PropMoves, breaks = 50, main = "") hist(dattree$PathLinearity, breaks = 50, main = "") hist(dattree$Singularity, breaks = 50, main = "") hist(dattree$BetweenCentrality, breaks = 50, main = "") # Indicator variable if table was used as info terminal only dattree$InfocardOnly <- factor(datcase$infocardOnly, levels = 0:1, labels = c("no", "yes")) # Add pattern to datcase; loosely based on Bousbia et al. (2009) dattree$Pattern <- "Dispersion" dattree$Pattern <- ifelse(dattree$PathLinearity > 0.8 & dattree$Singularity > 0.8, "Scholar", dattree$Pattern) dattree$Pattern <- ifelse(dattree$PathLinearity <= 0.8 & dattree$BetweenCentrality > 0.5, "Star", dattree$Pattern) dattree$Pattern <- factor(dattree$Pattern) # Remove cases with extreme outliers # TODO: Do I want this??? quantile(datcase$nopenTopic, 0.999) quantile(datcase$nopenPopup, 0.999) dattree <- dattree[!(dattree$NumTopic > 40 | dattree$NumPopup > 40), ] plot(dattree[, -1], pch = ".") par(mfrow = c(2,4)) hist(dattree$AvDurItem, breaks = 50, main = "") hist(dattree$NumItems, breaks = 50, main = "") hist(dattree$NumTopic, breaks = 50, main = "") hist(dattree$NumPopup, breaks = 50, main = "") hist(dattree$PropMoves, breaks = 50, main = "") hist(dattree$PathLinearity, breaks = 50, main = "") hist(dattree$Singularity, breaks = 50, main = "") hist(dattree$BetweenCentrality, breaks = 50, main = "") #--------------- (2) Clustering --------------- library(cluster) df <- dattree[1:10000, -1] # remove case variable # TODO: Do I need to scale or does normalization also work? # Normalize Duration and Numbers # df$AvDurItem <- normalize(df$AvDurItem) # df$NumItems <- normalize(df$NumItems) # df$NumTopic <- normalize(df$NumTopic) # df$NumPopup <- normalize(df$NumPopup) # summary(df) # Look at collinearity cor_mat <- cor(df) diag(cor_mat) <- NA heatmap(cor_mat) #df <- as.data.frame(scale(dattree[, -1])) #--------------- (2.2) Hierarchical clustering --------------- mat <- daisy(df, metric = "gower") # "Flatten" with MDS coor_2d <- as.data.frame(cmdscale(mat, k = 2)) coor_3d <- as.data.frame(cmdscale(mat, k = 3)) plot(coor_2d) rgl::plot3d(coor_3d) #mat <- dist(df) # https://uc-r.github.io/hc_clustering method <- c(average = "average", single = "single", complete = "complete", ward = "ward.D2") hc_method <- function(x) { hclust(mat, method = x) } hcs <- lapply(method, hc_method) cds <- lapply(hcs, cophenetic) cors <- sapply(cds, cor, y = mat) # https://en.wikipedia.org/wiki/Cophenetic_correlation # https://stats.stackexchange.com/questions/195446/choosing-the-right-linkage-method-for-hierarchical-clustering hc <- hcs$average # Something like a scree plot (??) plot(rev(hc$height)[1:100], type = "b", pch = 16, cex = .5) k <- 4 mycols <- c("#78004B", "#FF6900", "#3CB4DC", "#91C86E") grp_hclust <- cutree(hc, k = k) table(grp_hclust) fviz_cluster(list(data = df, cluster = grp_hclust), palette = mycols, ellipse.type = "convex", show.clust.cent = FALSE, ggtheme = theme_bw()) plot(coor_2d, col = mycols[grp_hclust]) legend("topleft", paste("Cl", 1:4), col = mycols, pch = 21) rgl::plot3d(coor_3d, col = mycols[grp_hclust]) table(datcase[grp_hclust == 1, "Pattern"]) table(datcase[grp_hclust == 2, "Pattern"]) table(datcase[grp_hclust == 3, "Pattern"]) table(datcase[grp_hclust == 4, "Pattern"]) aggregate(. ~ grp_hclust, df, mean) aggregate(cbind(duration, distance, scaleSize, rotationDegree, length, nmove, nflipCard, nopenTopic, nopenPopup) ~ grp_hclust, datcase, mean) #--------------- (2.3) DBSCAN clustering --------------- library(dbscan) d1 <- dbscan(df, eps = 1, minPts = ncol(df) + 1) hullplot(df, d1) grp_db <- d1$cluster table(grp_db) kNNdistplot(df, k = ncol(df)) abline(h = 0.2, col = "red") abline(h = 1, col = "red") fviz_cluster(list(data = df[grp_db != 0, ], cluster = grp_db[grp_db != 0]), palette = mycols, ellipse.type = "convex", show.clust.cent = FALSE, ggtheme = theme_bw()) mycols <- c("black", mycols) plot(coor_2d, col = mycols[grp_db + 1]) legend("topleft", paste("Cl", 0:4), col = mycols, pch = 21) rgl::plot3d(coor_3d, col = mycols[grp_db + 1]) aggregate(. ~ grp_db, df, mean) table(datcase[grp_db == 0, "Pattern"]) table(datcase[grp_db == 1, "Pattern"]) table(datcase[grp_db == 2, "Pattern"]) table(datcase[grp_db == 3, "Pattern"]) table(datcase[grp_db == 4, "Pattern"]) ### Look at selected cases ########################################### dattree[grp_db == 0, ] tmp <- dat tmp$start <- tmp$date.start tmp$complete <- tmp$date.stop alog <- activitylog(tmp[tmp$case == 15, ], case_id = "case", activity_id = "item", resource_id = "path", timestamps = c("start", "complete")) process_map(alog) rm(tmp) ###################################################################### res <- merge(dat, data.frame(case = dattree$case, grp_km, grp_hclust, grp_db), by = "case", all.x = TRUE) res <- res[order(res$fileId.start, res$date.start, res$timeMs.start), ] xtabs( ~ item + grp_db, res) aggregate(event ~ grp_db, res, table) # Look at clusters par(mfrow = c(2, 2)) vioplot::vioplot(duration ~ grp_db, res) vioplot::vioplot(distance ~ grp_db, res) vioplot::vioplot(scaleSize ~ grp_db, res) vioplot::vioplot(rotationDegree ~ grp_db, res) aggregate(cbind(duration, distance, scaleSize, rotationDegree) ~ grp_db, res, mean) aggregate(cbind(duration, distance, scaleSize, rotationDegree) ~ grp_db, res, median) write.table(res, file = "results/haum/event_logfiles_pre-corona_with-clusters_cases.csv", sep = ";", quote = FALSE, row.names = FALSE) save(res, mat, h1, h2, h3, h4, h5, c1, c2, c3, c4, c5, datcase, dattree, df, file = "results/haum/tmp_user-navigation.RData") #--------------- (3) Fit tree --------------- library(rpart) library(partykit) dattree_db <- dattree[grp_db != 0, -1] dattree_db$grp <- factor(grp_db[grp_db != 0]) c1 <- rpart(grp ~ ., data = dattree_db, method = "class") plot(as.party(c1)) c2 <- rpart(as.factor(grp_hclust) ~ ., data = dattree[, -1], method = "class") plot(as.party(c2)) # with conditional tree c2 <- ctree(grp ~ ., data = dattree_db, alpha = 0.05) plot(c2) # with excluded points c5 <- ctree(factor(grp_db) ~ ., data = dattree[, -1], alpha = 0) plot(c5) # with excluded points c6 <- ctree(factor(grp_db) ~ ., data = df, alpha = 0) plot(c6) # --> just checking #--------------- (4) Investigate variants --------------- res$start <- res$date.start res$complete <- res$date.stop alog <- activitylog(res, case_id = "case", activity_id = "item", resource_id = "path", timestamps = c("start", "complete")) trace_explorer(alog, n_traces = 25) # --> sequences of artworks are just too rare tr <- traces(alog) trace_length <- sapply(strsplit(tr$trace, ","), length) tr[trace_length > 10, ] trace_varied <- sapply(strsplit(tr$trace, ","), function(x) length(unique(x))) tr[trace_varied > 1, ] table(tr[trace_varied > 2, "absolute_frequency"]) table(tr[trace_varied > 3, "absolute_frequency"]) summary(tr$absolute_frequency) vioplot::vioplot(tr$absolute_frequency) # Power law for frequencies of traces tab <- table(tr$absolute_frequency) x <- as.numeric(tab) y <- as.numeric(names(tab)) plot(x, y, log = "xy") p1 <- lm(log(y) ~ log(x)) pre <- exp(coef(p1)[1]) * x^coef(p1)[2] lines(x, pre) # Look at individual traces as examples tr[trace_varied == 5 & trace_length > 50, ] # --> every variant exists only once, of course datcase[datcase$nitems == 5 & datcase$length > 50,] sapply(datcase[, -c(1, 9)], median) #ex <- datcase[datcase$nitems == 4 & datcase$length == 15,] ex <- datcase[datcase$nitems == 5,] ex <- ex[sample(1:nrow(ex), 20), ] # --> pretty randomly chosen... TODO: case_ids <- NULL for (case in ex$case) { if ("080" %in% res$item[res$case == case] | "503" %in% res$item[res$case == case]) { case_ids <- c(case_ids, TRUE) } else { case_ids <- c(case_ids, FALSE) } } cases <- ex$case[case_ids] for (case in cases) { alog <- activitylog(res[res$case == case, ], case_id = "case", activity_id = "item", resource_id = "path", timestamps = c("start", "complete")) dfg <- process_map(alog, type_nodes = frequency("absolute", color_scale = "Greys"), type_edges = frequency("absolute", color_edges = "#FF6900"), rankdir = "LR", render = FALSE) export_map(dfg, file_name = paste0("results/processmaps/dfg_example_cases_", case, "_R.pdf"), file_type = "pdf", title = paste("Case", case)) } ########################### TODO: Still need it? net <- process_map(alog, render = FALSE) #DiagrammeR::get_node_df(net) DiagrammeR::get_node_info(net) DiagrammeR::get_degree_distribution(net) DiagrammeR::get_degree_in(net) DiagrammeR::get_degree_out(net) DiagrammeR::get_degree_total(net) N <- DiagrammeR::count_nodes(net) - 2 # Do not count start and stop nodes dc <- DiagrammeR::get_degree_total(net)[1:N, "total_degree"] / (N - 1) inet <- DiagrammeR::to_igraph(net) igraph::centr_degree(inet, loops = FALSE) igraph::centr_betw(inet) igraph::centr_clo(inet)