800 lines
26 KiB
R
800 lines
26 KiB
R
# 09_user_navigation.R
|
|
#
|
|
# content: (1) Read data
|
|
# (1.1) Read log event data
|
|
# (1.2) Extract additional infos for clustering
|
|
# (2) Clustering
|
|
# (3) Fit tree
|
|
# (3) Investigate variants
|
|
#
|
|
# input: results/haum/event_logfiles_2024-02-21_16-07-33.csv
|
|
# output: results/haum/event_logfiles_pre-corona_with-clusters_cases.csv
|
|
# results/haum/dattree.csv
|
|
#
|
|
# last mod: 2024-02-27
|
|
|
|
|
|
# setwd("C:/Users/nwickelmaier/Nextcloud/Documents/MDS/2023ss/60100_master_thesis/analysis/code")
|
|
|
|
library(bupaverse)
|
|
library(factoextra)
|
|
|
|
#--------------- (1) Read data ---------------
|
|
|
|
#--------------- (1.1) Read log event data ---------------
|
|
|
|
dat0 <- read.table("results/haum/event_logfiles_2024-02-21_16-07-33.csv",
|
|
colClasses = c("character", "character", "POSIXct",
|
|
"POSIXct", "character", "integer",
|
|
"numeric", "character", "character",
|
|
rep("numeric", 3), "character",
|
|
"character", rep("numeric", 11),
|
|
"character", "character"),
|
|
sep = ";", header = TRUE)
|
|
|
|
dat0$event <- factor(dat0$event, levels = c("move", "flipCard", "openTopic",
|
|
"openPopup"))
|
|
dat0$topic <- factor(dat0$topic)
|
|
|
|
dat0$weekdays <- factor(weekdays(dat0$date.start),
|
|
levels = c("Montag", "Dienstag", "Mittwoch",
|
|
"Donnerstag", "Freitag", "Samstag",
|
|
"Sonntag"),
|
|
labels = c("Monday", "Tuesday", "Wednesday",
|
|
"Thursday", "Friday", "Saturday",
|
|
"Sunday"))
|
|
|
|
|
|
# Select data pre Corona
|
|
dat <- dat0[as.Date(dat0$date.start) < "2020-03-13", ]
|
|
dat <- dat[dat$path != 106098, ]
|
|
|
|
rm(dat0)
|
|
|
|
#--------------- (1.2) Extract additional infos for clustering ---------------
|
|
|
|
datcase <- aggregate(cbind(distance, scaleSize, rotationDegree) ~
|
|
case, dat, function(x) mean(x, na.rm = TRUE), na.action = NULL)
|
|
|
|
datcase$length <- aggregate(item ~ case, dat, length)$item
|
|
|
|
eventtab <- aggregate(event ~ case, dat, table)["case"]
|
|
eventtab$nmove <- aggregate(event ~ case, dat, table)$event[, "move"]
|
|
eventtab$nflipCard <- aggregate(event ~ case, dat, table)$event[, "flipCard"]
|
|
eventtab$nopenTopic <- aggregate(event ~ case, dat, table)$event[, "openTopic"]
|
|
eventtab$nopenPopup <- aggregate(event ~ case, dat, table)$event[, "openPopup"]
|
|
|
|
topictab <- aggregate(topic ~ case, dat, table)["case"]
|
|
topictab$artist <- aggregate(topic ~ case, dat, table)$topic[, 1]
|
|
topictab$details <- aggregate(topic ~ case, dat, table)$topic[, 2]
|
|
topictab$extra_info <- aggregate(topic ~ case, dat, table)$topic[, 3]
|
|
topictab$komposition <- aggregate(topic ~ case, dat, table)$topic[, 4]
|
|
topictab$leben_des_kunstwerks <- aggregate(topic ~ case, dat, table)$topic[, 5]
|
|
topictab$licht_und_farbe <- aggregate(topic ~ case, dat, table)$topic[, 6]
|
|
topictab$technik <- aggregate(topic ~ case, dat, table)$topic[, 7]
|
|
topictab$thema <- aggregate(topic ~ case, dat, table)$topic[, 8]
|
|
|
|
datcase <- datcase |>
|
|
merge(eventtab, by = "case", all = TRUE) |>
|
|
merge(topictab, by = "case", all = TRUE)
|
|
|
|
datcase$ntopiccards <- aggregate(topic ~ case, dat,
|
|
function(x) ifelse(all(is.na(x)), NA,
|
|
length(na.omit(x))), na.action =
|
|
NULL)$topic
|
|
datcase$ntopics <- aggregate(topic ~ case, dat,
|
|
function(x) ifelse(all(is.na(x)), NA,
|
|
length(unique(na.omit(x)))), na.action =
|
|
NULL)$topic
|
|
datcase$nitems <- aggregate(item ~ case, dat, function(x)
|
|
length(unique(x)), na.action = NULL)$item
|
|
datcase$npaths <- aggregate(path ~ case, dat, function(x)
|
|
length(unique(x)), na.action = NULL)$path
|
|
datcase$vacation <- aggregate(vacation ~ case, dat,
|
|
function(x) ifelse(all(is.na(x)), 0, 1),
|
|
na.action = NULL)$vacation
|
|
datcase$holiday <- aggregate(holiday ~ case, dat,
|
|
function(x) ifelse(all(is.na(x)), 0, 1),
|
|
na.action = NULL)$holiday
|
|
datcase$weekend <- aggregate(weekdays ~ case, dat,
|
|
function(x) ifelse(any(x %in% c("Saturday", "Sunday")), 1, 0),
|
|
na.action = NULL)$weekdays
|
|
datcase$morning <- aggregate(date.start ~ case, dat,
|
|
function(x) ifelse(lubridate::hour(x[1]) > 13, 0, 1),
|
|
na.action = NULL)$date.start
|
|
|
|
dat_split <- split(dat, ~ case)
|
|
|
|
time_minmax <- function(subdata) {
|
|
subdata$min_time <- min(subdata$timeMs.start)
|
|
if (all(is.na(subdata$timeMs.stop))) {
|
|
subdata$max_time <- NA
|
|
} else {
|
|
subdata$max_time <- max(subdata$timeMs.stop, na.rm = TRUE)
|
|
}
|
|
subdata
|
|
}
|
|
# TODO: Export from package mtt
|
|
|
|
dat_list <- pbapply::pblapply(dat_split, time_minmax)
|
|
dat_minmax <- dplyr::bind_rows(dat_list)
|
|
|
|
datcase$min_time <- aggregate(min_time ~ case, dat_minmax, unique)$min_time
|
|
datcase$max_time <- aggregate(max_time ~ case, dat_minmax, unique)$max_time
|
|
|
|
datcase$duration <- datcase$max_time - datcase$min_time
|
|
datcase$min_time <- NULL
|
|
datcase$max_time <- NULL
|
|
|
|
|
|
check_infocards <- function(subdata, artworks) {
|
|
infocard_only <- NULL
|
|
if(any(unique(subdata$item) %in% artworks)) {
|
|
infocard_only <- FALSE
|
|
} else {
|
|
infocard_only <- TRUE
|
|
}
|
|
as.numeric(infocard_only)
|
|
}
|
|
|
|
artworks <- unique(dat$item)[!unique(dat$item) %in% c("501", "502", "503")]
|
|
|
|
datcase$infocardOnly <- pbapply::pbsapply(dat_split, check_infocards, artworks = artworks)
|
|
|
|
|
|
# Clean up NAs
|
|
datcase$distance <- ifelse(is.na(datcase$distance), 0, datcase$distance)
|
|
datcase$scaleSize <- ifelse(is.na(datcase$scaleSize), 1, datcase$scaleSize)
|
|
datcase$rotationDegree <- ifelse(is.na(datcase$rotationDegree), 0, datcase$rotationDegree)
|
|
datcase$artist <- ifelse(is.na(datcase$artist), 0, datcase$artist)
|
|
datcase$details <- ifelse(is.na(datcase$details), 0, datcase$details)
|
|
datcase$extra_info <- ifelse(is.na(datcase$extra_info), 0, datcase$extra_info)
|
|
datcase$komposition <- ifelse(is.na(datcase$komposition), 0, datcase$komposition)
|
|
datcase$leben_des_kunstwerks <- ifelse(is.na(datcase$leben_des_kunstwerks), 0, datcase$leben_des_kunstwerks)
|
|
datcase$licht_und_farbe <- ifelse(is.na(datcase$licht_und_farbe), 0, datcase$licht_und_farbe)
|
|
datcase$technik <- ifelse(is.na(datcase$technik), 0, datcase$technik)
|
|
datcase$thema <- ifelse(is.na(datcase$thema), 0, datcase$thema)
|
|
datcase$ntopics <- ifelse(is.na(datcase$ntopics), 0, datcase$ntopics)
|
|
datcase$ntopiccards <- ifelse(is.na(datcase$ntopiccards), 0, datcase$ntopiccards)
|
|
|
|
|
|
|
|
cor_mat <- cor(datcase[, -1], use = "pairwise")
|
|
diag(cor_mat) <- NA
|
|
heatmap(cor_mat)
|
|
|
|
|
|
normalize <- function(x) {
|
|
(x - min(x)) / (max(x) - min(x))
|
|
}
|
|
|
|
|
|
#df <- as.data.frame(lapply(datcase[, -1], normalize))
|
|
df <- as.data.frame(lapply(datcase[, -1], scale))
|
|
#df <- datcase[, -1]
|
|
|
|
# "Flatten" with PCA
|
|
pc <- prcomp(df)
|
|
coor_2d <- as.data.frame(pc$x[, c(1, 2)])
|
|
coor_3d <- as.data.frame(pc$x[, c(1, 2, 3)])
|
|
|
|
plot(coor_2d)
|
|
rgl::plot3d(coor_3d)
|
|
|
|
#--------------- (2.1) K-Means clustering ---------------
|
|
|
|
mycols <- c("#78004B", "#FF6900", "#3CB4DC", "#91C86E")
|
|
|
|
k1 <- kmeans(df, 4)
|
|
|
|
grp_km <- k1$cluster
|
|
table(grp_km)
|
|
|
|
fviz_cluster(list(data = df, cluster = grp_km),
|
|
palette = mycols,
|
|
ellipse.type = "convex",
|
|
show.clust.cent = FALSE,
|
|
ggtheme = theme_bw())
|
|
|
|
plot(coor_2d, col = mycols[grp_km])
|
|
|
|
rgl::plot3d(coor_3d, col = mycols[grp_km])
|
|
|
|
aggregate(. ~ grp_km, df, mean)
|
|
|
|
#--------------- (2.2) Hierarchical clustering ---------------
|
|
|
|
mat <- dist(df)
|
|
|
|
h1 <- hclust(mat, method = "average")
|
|
h2 <- hclust(mat, method = "complete")
|
|
h3 <- hclust(mat, method = "ward.D")
|
|
h4 <- hclust(mat, method = "ward.D2")
|
|
h5 <- hclust(mat, method = "single")
|
|
|
|
# Cophenetic Distances, for each linkage (runs quite some time!)
|
|
c1 <- cophenetic(h1)
|
|
c2 <- cophenetic(h2)
|
|
c3 <- cophenetic(h3)
|
|
c4 <- cophenetic(h4)
|
|
c5 <- cophenetic(h5)
|
|
|
|
# Correlations
|
|
cor(mat, c1)
|
|
cor(mat, c2)
|
|
cor(mat, c3)
|
|
cor(mat, c4)
|
|
cor(mat, c5)
|
|
|
|
# https://en.wikipedia.org/wiki/Cophenetic_correlation
|
|
# https://stats.stackexchange.com/questions/195446/choosing-the-right-linkage-method-for-hierarchical-clustering
|
|
|
|
hc <- h4
|
|
|
|
# Something like a scree plot (??)
|
|
plot(rev(hc$height)[1:100], type = "b", pch = 16, cex = .5)
|
|
|
|
k <- 4
|
|
|
|
grp_hclust <- cutree(hc, k = k)
|
|
|
|
table(grp_hclust)
|
|
|
|
fviz_cluster(list(data = df, cluster = grp_hclust),
|
|
palette = mycols,
|
|
ellipse.type = "convex",
|
|
show.clust.cent = FALSE,
|
|
ggtheme = theme_bw())
|
|
|
|
plot(coor_2d, col = mycols[grp_hclust])
|
|
rgl::plot3d(coor_3d, col = mycols[grp_hclust])
|
|
|
|
table(dattree[grp_hclust == 1, "Pattern"])
|
|
table(dattree[grp_hclust == 2, "Pattern"])
|
|
table(dattree[grp_hclust == 3, "Pattern"])
|
|
table(dattree[grp_hclust == 4, "Pattern"])
|
|
|
|
|
|
aggregate(. ~ grp_hclust, df, mean)
|
|
|
|
|
|
aggregate(cbind(duration, distance, scaleSize, rotationDegree, length,
|
|
nmove, nflipCard, nopenTopic, nopenPopup) ~ grp_hclust, datcase,
|
|
mean)
|
|
|
|
#--------------- (2.3) DBSCAN clustering ---------------
|
|
|
|
library(dbscan)
|
|
d1 <- dbscan(df, eps = .15, minPts = ncol(df) + 1)
|
|
hullplot(df, d1)
|
|
|
|
grp_db <- d1$cluster
|
|
table(grp_db)
|
|
|
|
kNNdistplot(df, k = ncol(df))
|
|
abline(h = 0.2, col = "red")
|
|
abline(h = 0.06, col = "red")
|
|
|
|
fviz_cluster(list(data = df[grp_db != 0, ], cluster = grp_db[grp_db != 0]),
|
|
#palette = mycols,
|
|
ellipse.type = "convex",
|
|
show.clust.cent = FALSE,
|
|
ggtheme = theme_bw())
|
|
|
|
mycols <- c("black", mycols)
|
|
|
|
plot(coor_2d, col = mycols[grp_db + 1])
|
|
rgl::plot3d(coor_3d, col = mycols[grp_db + 1])
|
|
|
|
aggregate(. ~ grp_db, df, mean)
|
|
|
|
table(dattree[grp_db == 0, "Pattern"])
|
|
table(dattree[grp_db == 1, "Pattern"])
|
|
table(dattree[grp_db == 2, "Pattern"])
|
|
table(dattree[grp_db == 3, "Pattern"])
|
|
|
|
# Does not really work with these features!
|
|
|
|
|
|
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
|
|
|
# Navigation types by Bousbia et al. (2010):
|
|
# - Overviewing: this value is close to the Canter “scanning” value. It
|
|
# implies that the learner is covering a large proportion of course pages.
|
|
# Through this phase of fast-reading, the user seeks to acquire an
|
|
# overall view of the course.
|
|
# - Flitting: close to “wandering”. It reflects a browsing activity without a
|
|
# strategy or a particular goal. The main difference with the overviewing
|
|
# type is the lack of focus on the course.
|
|
# - Studying: corresponds to a partial or complete reading of the course
|
|
# pages where the learner spends time on each page.
|
|
# - Deepening: This describes a learner who spends relatively long time on a
|
|
# course, checking details, and seeking Web documents related to the course
|
|
# topics. The main difference with studying is the Web search part that the
|
|
# learner uses to obtain a deeper understanding of the course.
|
|
|
|
# Taxonomy defined by Canter et al. (1985):
|
|
# - Scanning: seeking an overview of a theme (i.e. subpart of the hypermedia)
|
|
# by requesting an important proportion of its pages but without spending
|
|
# much time on them.
|
|
# - Browsing: going wherever the data leads the navigator until catching an
|
|
# interest.
|
|
# - Exploring: reading the viewed pages thoroughly.
|
|
# - Searching: seeking for a particular document or information.
|
|
# - Wandering: navigating in an unstructured fashion without any particular
|
|
# goal or strategy.
|
|
|
|
# Features for navigation types for MTT:
|
|
# - Scanning / Overviewing:
|
|
# * Proportion of artworks looked at is high: datcase$nitems / 70
|
|
# * Duration per artwork is low: "ave_duration_item" / datcase$duration
|
|
# - Exploring:
|
|
# * Looking at additional information for most items touched (high value):
|
|
# harmonic mean of datcase$nopenTopic / datcase$nflipCard and
|
|
# datcase$nopenPopup / datcase$nopenTopic
|
|
# - Searching / Studying:
|
|
# * Looking only at a few items
|
|
# datcase$nitems / 70 is low
|
|
# * Opening few cards
|
|
# datcase$nflipCard / mean(datcase$nflipCard) or median(datcase$nflipCard) is low
|
|
# * but for most cards popups are opened:
|
|
# datcase$nopenPopup / datcase$nflipCard is high
|
|
# - Wandering / Flitting:
|
|
# * Items are mostly just moved:
|
|
# datcase$nmove / datcase$length is high
|
|
# * Duration per case is low:
|
|
# datcase$duration / mean(datcase$duration) or median(datcase$duration)
|
|
# * Duration per artwork is low: "ave_duration_item" / datcase$duration
|
|
|
|
dattree <- data.frame(case = datcase$case,
|
|
#Duration = datcase$duration,
|
|
PropItems = datcase$nitems / length(unique(dat$item)),
|
|
#PropTopic = datcase$nopenTopic / datcase$nflipCard,
|
|
#PropPopup = datcase$nopenPopup / datcase$nopenTopic,
|
|
# SearchInfo =
|
|
# 2*(((datcase$nopenPopup / datcase$nopenTopic) *
|
|
# (datcase$nopenTopic / datcase$nflipCard)) /
|
|
# ((datcase$nopenPopup / datcase$nopenTopic) +
|
|
# (datcase$nopenTopic / datcase$nflipCard))
|
|
# ),
|
|
SearchInfo = datcase$nopenTopic / datcase$nflipCard +
|
|
datcase$nopenPopup / datcase$nopenTopic,
|
|
PropMoves = datcase$nmove / datcase$length,
|
|
PathLinearity = datcase$nitems / datcase$npaths,
|
|
Singularity = datcase$npaths / datcase$length
|
|
)
|
|
|
|
#dattree$SearchInfo <- ifelse(dattree$SearchInfo %in% 0, 0.1, dattree$SearchInfo)
|
|
dattree$SearchInfo <- ifelse(is.na(dattree$SearchInfo), 0, dattree$SearchInfo)
|
|
|
|
#dattree$PropTopic <- ifelse(is.na(dattree$PropTopic), 0, dattree$PropTopic)
|
|
#dattree$PropPopup <- ifelse(is.na(dattree$PropPopup), 0, dattree$PropPopup)
|
|
|
|
get_centrality <- function(case, data) {
|
|
|
|
data$start <- data$date.start
|
|
data$complete <- data$date.stop
|
|
|
|
alog <- activitylog(data[data$case == case, ],
|
|
case_id = "case",
|
|
activity_id = "item",
|
|
resource_id = "path",
|
|
timestamps = c("start", "complete"))
|
|
|
|
net <- process_map(alog, render = FALSE)
|
|
inet <- DiagrammeR::to_igraph(net)
|
|
|
|
c(igraph::centr_degree(inet, loops = FALSE)$centralization,
|
|
igraph::centr_degree(inet, loops = TRUE)$centralization,
|
|
igraph::centr_betw(inet)$centralization)
|
|
}
|
|
|
|
|
|
centrality <- lapply(dattree$case, get_centrality, data = dat)
|
|
centrality <- do.call(rbind, centrality)
|
|
|
|
save(centrality, file = "results/haum/tmp_centrality.RData")
|
|
|
|
#dattree$centr_degree <- centrality[, 1]
|
|
#dattree$centr_degree_loops <- centrality[, 2]
|
|
dattree$DegreeCentrality <- centrality[, 2]
|
|
#dattree$BetweenCentrality <- centrality[, 3]
|
|
|
|
## Add average duration per item
|
|
|
|
dat_split <- split(dat[, c("item", "case", "path", "timeMs.start", "timeMs.stop")], ~ path)
|
|
|
|
dat_list <- pbapply::pblapply(dat_split, time_minmax)
|
|
dat_minmax <- dplyr::bind_rows(dat_list)
|
|
|
|
tmp <- aggregate(min_time ~ path, dat_minmax, unique)
|
|
tmp$max_time <- aggregate(max_time ~ path, dat_minmax, unique, na.action = NULL)$max_time
|
|
tmp$duration <- tmp$max_time - tmp$min_time
|
|
tmp$case <- aggregate(case ~ path, dat_minmax, unique)$case
|
|
|
|
dattree$AvDurItem <- aggregate(duration ~ case, tmp, mean)$duration
|
|
|
|
rm(tmp)
|
|
|
|
plot(dattree[, -1], pch = ".")
|
|
|
|
par(mfrow = c(3,4))
|
|
hist(dattree$Duration, breaks = 50, main = "")
|
|
hist(dattree$AvDurItem, breaks = 50, main = "")
|
|
hist(dattree$PropItems, breaks = 50, main = "")
|
|
hist(dattree$PropTopic, breaks = 50, main = "")
|
|
hist(dattree$PropPopup, breaks = 50, main = "")
|
|
hist(dattree$PropMoves, breaks = 50, main = "")
|
|
hist(dattree$PathLinearity, breaks = 50, main = "")
|
|
hist(dattree$Singularity, breaks = 50, main = "")
|
|
hist(dattree$centr_degree, breaks = 50, main = "")
|
|
hist(dattree$centr_degree_loops, breaks = 50, main = "")
|
|
hist(dattree$centr_between, breaks = 50, main = "")
|
|
|
|
|
|
cor_mat <- cor(dattree[, -1], use = "pairwise")
|
|
diag(cor_mat) <- NA
|
|
heatmap(cor_mat)
|
|
|
|
|
|
# dattree$Pattern <- "Dispersion"
|
|
# dattree$Pattern <- ifelse(dattree$PathLinearity > 0.8 &
|
|
# dattree$Singularity > 0.8, "Scholar",
|
|
# dattree$Pattern)
|
|
# dattree$Pattern <- ifelse(dattree$PathLinearity <= 0.8 &
|
|
# dattree$centr_between > 0.5, "Star",
|
|
# dattree$Pattern)
|
|
|
|
write.table(dattree,
|
|
file = "results/haum/dattree.csv",
|
|
sep = ";",
|
|
quote = FALSE,
|
|
row.names = FALSE)
|
|
|
|
#--------------- (2) Clustering ---------------
|
|
|
|
df <- dattree[, c("AvDurItem", "PropItems", "PropTopic", "PropPopup", "PropMoves")]
|
|
#df <- dattree[, c("AvDurItem", "PropItems", "SearchInfo", "PropMoves")]
|
|
df$Scholar <- ifelse(dattree$Pattern == "Scholar", 1, 0)
|
|
df$Star <- ifelse(dattree$Pattern == "Star", 1, 0)
|
|
df$Dispersion <- ifelse(dattree$Pattern == "Dispersion", 1, 0)
|
|
|
|
# scale Duration and min/max SearchInfo
|
|
#df$AvDurItem <- as.numeric(scale(df$AvDurItem))
|
|
df$AvDurItem <- (df$AvDurItem - min(df$AvDurItem, na.rm = TRUE)) /
|
|
(max(df$AvDurItem, na.rm = TRUE) - min(df$AvDurItem, na.rm = TRUE))
|
|
#df$SearchInfo <- (df$SearchInfo - min(df$SearchInfo)) /
|
|
# (max(df$SearchInfo) - min(df$SearchInfo))
|
|
df$PropTopic <- (df$PropTopic - min(df$PropTopic, na.rm = TRUE)) /
|
|
(max(df$PropTopic, na.rm = TRUE) - min(df$PropTopic, na.rm = TRUE))
|
|
df$PropPopup <- (df$PropPopup - min(df$PropPopup, na.rm = TRUE)) /
|
|
(max(df$PropPopup, na.rm = TRUE) - min(df$PropPopup, na.rm = TRUE))
|
|
|
|
|
|
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
|
df <- dattree[, -1]
|
|
df$AvDurItem <- normalize(df$AvDurItem)
|
|
df$SearchInfo <- normalize(df$SearchInfo)
|
|
df$InfocardOnly <- datcase$infocardOnly
|
|
summary(df)
|
|
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
|
|
|
# "Flatten" with PCA
|
|
pc <- prcomp(df)
|
|
coor_2d <- as.data.frame(pc$x[, c(1, 2)])
|
|
coor_3d <- as.data.frame(pc$x[, c(1, 2, 3)])
|
|
|
|
plot(coor_2d)
|
|
rgl::plot3d(coor_3d)
|
|
|
|
#--------------- (2.1) K-Means clustering ---------------
|
|
|
|
mycols <- c("#78004B", "#FF6900", "#3CB4DC", "#91C86E")
|
|
|
|
k1 <- kmeans(df, 4)
|
|
|
|
grp_km <- k1$cluster
|
|
table(grp_km)
|
|
|
|
fviz_cluster(list(data = df, cluster = grp_km),
|
|
palette = mycols,
|
|
ellipse.type = "convex",
|
|
show.clust.cent = FALSE,
|
|
ggtheme = theme_bw())
|
|
|
|
plot(coor_2d, col = mycols[grp_km])
|
|
|
|
rgl::plot3d(coor_3d, col = mycols[grp_km])
|
|
|
|
aggregate(. ~ grp_km, df, mean)
|
|
|
|
#--------------- (2.2) Hierarchical clustering ---------------
|
|
|
|
mat <- dist(df)
|
|
|
|
h1 <- hclust(mat, method = "average")
|
|
h2 <- hclust(mat, method = "complete")
|
|
h3 <- hclust(mat, method = "ward.D")
|
|
h4 <- hclust(mat, method = "ward.D2")
|
|
h5 <- hclust(mat, method = "single")
|
|
|
|
# Cophenetic Distances, for each linkage (runs quite some time!)
|
|
c1 <- cophenetic(h1)
|
|
c2 <- cophenetic(h2)
|
|
c3 <- cophenetic(h3)
|
|
c4 <- cophenetic(h4)
|
|
c5 <- cophenetic(h5)
|
|
|
|
# Correlations
|
|
cor(mat, c1)
|
|
cor(mat, c2)
|
|
cor(mat, c3)
|
|
cor(mat, c4)
|
|
cor(mat, c5)
|
|
|
|
# https://en.wikipedia.org/wiki/Cophenetic_correlation
|
|
# https://stats.stackexchange.com/questions/195446/choosing-the-right-linkage-method-for-hierarchical-clustering
|
|
|
|
hc <- h4
|
|
|
|
# Something like a scree plot (??)
|
|
plot(rev(hc$height)[1:100], type = "b", pch = 16, cex = .5)
|
|
|
|
k <- 4
|
|
|
|
grp_hclust <- cutree(hc, k = k)
|
|
|
|
table(grp_hclust)
|
|
|
|
fviz_cluster(list(data = df, cluster = grp_hclust),
|
|
palette = mycols,
|
|
ellipse.type = "convex",
|
|
show.clust.cent = FALSE,
|
|
ggtheme = theme_bw())
|
|
|
|
plot(coor_2d, col = mycols[grp_hclust])
|
|
rgl::plot3d(coor_3d, col = mycols[grp_hclust])
|
|
|
|
table(dattree[grp_hclust == 1, "Pattern"])
|
|
table(dattree[grp_hclust == 2, "Pattern"])
|
|
table(dattree[grp_hclust == 3, "Pattern"])
|
|
table(dattree[grp_hclust == 4, "Pattern"])
|
|
|
|
|
|
aggregate(. ~ grp_hclust, df, mean)
|
|
|
|
|
|
aggregate(cbind(duration, distance, scaleSize, rotationDegree, length,
|
|
nmove, nflipCard, nopenTopic, nopenPopup) ~ grp_hclust, datcase,
|
|
mean)
|
|
|
|
#--------------- (2.3) DBSCAN clustering ---------------
|
|
|
|
library(dbscan)
|
|
d1 <- dbscan(df, eps = .3, minPts = ncol(df) + 1)
|
|
hullplot(df, d1)
|
|
|
|
grp_db <- d1$cluster
|
|
table(grp_db)
|
|
|
|
kNNdistplot(df, k = ncol(df))
|
|
abline(h = 0.2, col = "red")
|
|
abline(h = 0.06, col = "red")
|
|
|
|
fviz_cluster(list(data = df[grp_db != 0, ], cluster = grp_db[grp_db != 0]),
|
|
#palette = mycols,
|
|
ellipse.type = "convex",
|
|
show.clust.cent = FALSE,
|
|
ggtheme = theme_bw())
|
|
|
|
mycols <- c("black", mycols)
|
|
|
|
plot(coor_2d, col = mycols[grp_db + 1])
|
|
legend("topleft", paste("Cl", 0:4), col = mycols, pch = 21)
|
|
rgl::plot3d(coor_3d, col = mycols[grp_db + 1])
|
|
|
|
aggregate(. ~ grp_db, df, mean)
|
|
|
|
table(dattree[grp_db == 0, "Pattern"])
|
|
table(dattree[grp_db == 1, "Pattern"])
|
|
table(dattree[grp_db == 2, "Pattern"])
|
|
table(dattree[grp_db == 3, "Pattern"])
|
|
|
|
### Look at selected cases ###########################################
|
|
tmp <- dat
|
|
tmp$start <- tmp$date.start
|
|
tmp$complete <- tmp$date.stop
|
|
|
|
alog <- activitylog(tmp[tmp$case == 30418, ],
|
|
case_id = "case",
|
|
activity_id = "item",
|
|
resource_id = "path",
|
|
timestamps = c("start", "complete"))
|
|
|
|
process_map(alog)
|
|
|
|
rm(tmp)
|
|
|
|
######################################################################
|
|
|
|
res <- merge(dat, data.frame(case = dattree$case, grp_km, grp_hclust, grp_db),
|
|
by = "case", all.x = TRUE)
|
|
res <- res[order(res$fileId.start, res$date.start, res$timeMs.start), ]
|
|
|
|
xtabs( ~ item + grp_db, res)
|
|
aggregate(event ~ grp_db, res, table)
|
|
|
|
# Look at clusters
|
|
par(mfrow = c(2, 2))
|
|
vioplot::vioplot(duration ~ grp_db, res)
|
|
vioplot::vioplot(distance ~ grp_db, res)
|
|
vioplot::vioplot(scaleSize ~ grp_db, res)
|
|
vioplot::vioplot(rotationDegree ~ grp_db, res)
|
|
|
|
aggregate(cbind(duration, distance, scaleSize, rotationDegree) ~ grp_db, res, mean)
|
|
aggregate(cbind(duration, distance, scaleSize, rotationDegree) ~ grp_db, res, median)
|
|
|
|
write.table(res,
|
|
file = "results/haum/event_logfiles_pre-corona_with-clusters_cases.csv",
|
|
sep = ";",
|
|
quote = FALSE,
|
|
row.names = FALSE)
|
|
|
|
save(res, mat, h1, h2, h3, h4, h5, c1, c2, c3, c4, c5, datcase, dattree, df,
|
|
file = "results/haum/tmp_user-navigation.RData")
|
|
|
|
#--------------- (3) Fit tree ---------------
|
|
|
|
library(rpart)
|
|
library(partykit)
|
|
|
|
dattree_db <- dattree[grp_db != 0, -1]
|
|
dattree_db$grp <- factor(grp_db[grp_db != 0])
|
|
dattree_db$Pattern <- factor(dattree_db$Pattern)
|
|
|
|
c1 <- rpart(grp ~ ., data = dattree_db, method = "class")
|
|
plot(as.party(c1))
|
|
|
|
c2 <- rpart(as.factor(grp_db) ~ ., data = dattree[, -1], method = "class")
|
|
plot(as.party(c2))
|
|
|
|
|
|
|
|
c1 <- rpart(grp ~ AvDurItem + PropItems + PropTopic + PropPopup + PropMoves +
|
|
Pattern, data = dattree_db, method = "class")
|
|
plot(as.party(c1))
|
|
|
|
# with conditional tree
|
|
c2 <- ctree(grp ~ ., data = dattree_db, alpha = 0.5)
|
|
plot(c2)
|
|
|
|
c3 <- ctree(grp ~ AvDurItem + PropItems + PropTopic + PropPopup +
|
|
PropMoves + Pattern, data = dattree_db, alpha = 0)
|
|
plot(c3)
|
|
|
|
c4 <- ctree(grp ~ AvDurItem + PropItems + PropTopic + PropPopup +
|
|
PropMoves + Pattern, data = dattree_db, alpha = 1)
|
|
plot(c4)
|
|
|
|
# with excluded points
|
|
c5 <- ctree(factor(grp_db) ~ ., data = dattree[, -1], alpha = 0.05)
|
|
plot(c5)
|
|
|
|
# with excluded points
|
|
c6 <- ctree(factor(grp_db) ~ ., data = df, alpha = 1)
|
|
plot(c6)
|
|
# --> just checking
|
|
|
|
#--------------- (4) Investigate variants ---------------
|
|
|
|
res$start <- res$date.start
|
|
res$complete <- res$date.stop
|
|
|
|
alog <- activitylog(res,
|
|
case_id = "case",
|
|
activity_id = "item",
|
|
resource_id = "path",
|
|
timestamps = c("start", "complete"))
|
|
|
|
trace_explorer(alog, n_traces = 25)
|
|
# --> sequences of artworks are just too rare
|
|
|
|
tr <- traces(alog)
|
|
trace_length <- sapply(strsplit(tr$trace, ","), length)
|
|
tr[trace_length > 10, ]
|
|
|
|
trace_varied <- sapply(strsplit(tr$trace, ","), function(x) length(unique(x)))
|
|
tr[trace_varied > 1, ]
|
|
table(tr[trace_varied > 2, "absolute_frequency"])
|
|
table(tr[trace_varied > 3, "absolute_frequency"])
|
|
|
|
summary(tr$absolute_frequency)
|
|
vioplot::vioplot(tr$absolute_frequency)
|
|
|
|
# Power law for frequencies of traces
|
|
tab <- table(tr$absolute_frequency)
|
|
x <- as.numeric(tab)
|
|
y <- as.numeric(names(tab))
|
|
|
|
plot(x, y, log = "xy")
|
|
p1 <- lm(log(y) ~ log(x))
|
|
pre <- exp(coef(p1)[1]) * x^coef(p1)[2]
|
|
lines(x, pre)
|
|
|
|
|
|
# Look at individual traces as examples
|
|
tr[trace_varied == 5 & trace_length > 50, ]
|
|
# --> every variant exists only once, of course
|
|
datcase[datcase$nitems == 5 & datcase$length > 50,]
|
|
|
|
sapply(datcase[, -c(1, 9)], median)
|
|
|
|
#ex <- datcase[datcase$nitems == 4 & datcase$length == 15,]
|
|
ex <- datcase[datcase$nitems == 5,]
|
|
ex <- ex[sample(1:nrow(ex), 20), ]
|
|
# --> pretty randomly chosen... TODO:
|
|
|
|
case_ids <- NULL
|
|
|
|
for (case in ex$case) {
|
|
if ("080" %in% res$item[res$case == case] | "503" %in% res$item[res$case == case]) {
|
|
case_ids <- c(case_ids, TRUE)
|
|
} else {
|
|
case_ids <- c(case_ids, FALSE)
|
|
}
|
|
}
|
|
|
|
cases <- ex$case[case_ids]
|
|
|
|
|
|
for (case in cases) {
|
|
|
|
alog <- activitylog(res[res$case == case, ],
|
|
case_id = "case",
|
|
activity_id = "item",
|
|
resource_id = "path",
|
|
timestamps = c("start", "complete"))
|
|
|
|
dfg <- process_map(alog,
|
|
type_nodes = frequency("absolute", color_scale = "Greys"),
|
|
type_edges = frequency("absolute", color_edges = "#FF6900"),
|
|
rankdir = "LR",
|
|
render = FALSE)
|
|
export_map(dfg,
|
|
file_name = paste0("results/processmaps/dfg_example_cases_", case, "_R.pdf"),
|
|
file_type = "pdf",
|
|
title = paste("Case", case))
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
########################### TODO: Still need it?
|
|
|
|
|
|
net <- process_map(alog, render = FALSE)
|
|
#DiagrammeR::get_node_df(net)
|
|
|
|
DiagrammeR::get_node_info(net)
|
|
|
|
DiagrammeR::get_degree_distribution(net)
|
|
|
|
DiagrammeR::get_degree_in(net)
|
|
DiagrammeR::get_degree_out(net)
|
|
DiagrammeR::get_degree_total(net)
|
|
|
|
|
|
N <- DiagrammeR::count_nodes(net) - 2 # Do not count start and stop nodes
|
|
|
|
dc <- DiagrammeR::get_degree_total(net)[1:N, "total_degree"] / (N - 1)
|
|
|
|
inet <- DiagrammeR::to_igraph(net)
|
|
igraph::centr_degree(inet, loops = FALSE)
|
|
igraph::centr_betw(inet)
|
|
igraph::centr_clo(inet)
|
|
|
|
|
|
|