mtt_haum/code/09_user-navigation.R

627 lines
21 KiB
R
Raw Normal View History

2024-02-07 13:26:35 +01:00
# 09_user_navigation.R
#
# content: (1) Read data
# (1.1) Read log event data
# (1.2) Extract additional infos for clustering
# (2) Clustering
# (3) Fit tree
2024-02-07 13:26:35 +01:00
# (3) Investigate variants
#
# input: results/haum/event_logfiles_2024-02-21_16-07-33.csv
# output: results/haum/event_logfiles_pre-corona_with-clusters_cases.csv
# results/haum/dattree.csv
#
# last mod: 2024-02-27
# setwd("C:/Users/nwickelmaier/Nextcloud/Documents/MDS/2023ss/60100_master_thesis/analysis/code")
library(bupaverse)
library(factoextra)
#--------------- (1) Read data ---------------
#--------------- (1.1) Read log event data ---------------
dat0 <- read.table("results/haum/event_logfiles_2024-02-21_16-07-33.csv",
colClasses = c("character", "character", "POSIXct",
"POSIXct", "character", "integer",
"numeric", "character", "character",
rep("numeric", 3), "character",
"character", rep("numeric", 11),
"character", "character"),
sep = ";", header = TRUE)
2024-02-13 09:03:35 +01:00
dat0$event <- factor(dat0$event, levels = c("move", "flipCard", "openTopic",
"openPopup"))
dat0$topic <- factor(dat0$topic)
dat0$weekdays <- factor(weekdays(dat0$date.start),
levels = c("Montag", "Dienstag", "Mittwoch",
"Donnerstag", "Freitag", "Samstag",
"Sonntag"),
labels = c("Monday", "Tuesday", "Wednesday",
"Thursday", "Friday", "Saturday",
"Sunday"))
# Select data pre Corona
dat <- dat0[as.Date(dat0$date.start) < "2020-03-13", ]
dat <- dat[dat$path != 106098, ]
2024-03-01 17:39:07 +01:00
rm(dat0)
#--------------- (1.2) Extract additional infos for clustering ---------------
datcase <- aggregate(cbind(distance, scaleSize, rotationDegree) ~
case, dat, function(x) mean(x, na.rm = TRUE), na.action = NULL)
datcase$length <- aggregate(item ~ case, dat, length)$item
eventtab <- aggregate(event ~ case, dat, table)["case"]
eventtab$nmove <- aggregate(event ~ case, dat, table)$event[, "move"]
eventtab$nflipCard <- aggregate(event ~ case, dat, table)$event[, "flipCard"]
eventtab$nopenTopic <- aggregate(event ~ case, dat, table)$event[, "openTopic"]
eventtab$nopenPopup <- aggregate(event ~ case, dat, table)$event[, "openPopup"]
topictab <- aggregate(topic ~ case, dat, table)["case"]
topictab$artist <- aggregate(topic ~ case, dat, table)$topic[, 1]
topictab$details <- aggregate(topic ~ case, dat, table)$topic[, 2]
topictab$extra_info <- aggregate(topic ~ case, dat, table)$topic[, 3]
topictab$komposition <- aggregate(topic ~ case, dat, table)$topic[, 4]
topictab$leben_des_kunstwerks <- aggregate(topic ~ case, dat, table)$topic[, 5]
topictab$licht_und_farbe <- aggregate(topic ~ case, dat, table)$topic[, 6]
topictab$technik <- aggregate(topic ~ case, dat, table)$topic[, 7]
topictab$thema <- aggregate(topic ~ case, dat, table)$topic[, 8]
datcase <- datcase |>
merge(eventtab, by = "case", all = TRUE) |>
merge(topictab, by = "case", all = TRUE)
datcase$ntopiccards <- aggregate(topic ~ case, dat,
function(x) ifelse(all(is.na(x)), NA,
length(na.omit(x))), na.action =
NULL)$topic
datcase$ntopics <- aggregate(topic ~ case, dat,
function(x) ifelse(all(is.na(x)), NA,
length(unique(na.omit(x)))), na.action =
NULL)$topic
datcase$nitems <- aggregate(item ~ case, dat, function(x)
length(unique(x)), na.action = NULL)$item
datcase$npaths <- aggregate(path ~ case, dat, function(x)
length(unique(x)), na.action = NULL)$path
datcase$vacation <- aggregate(vacation ~ case, dat,
function(x) ifelse(all(is.na(x)), 0, 1),
na.action = NULL)$vacation
datcase$holiday <- aggregate(holiday ~ case, dat,
function(x) ifelse(all(is.na(x)), 0, 1),
na.action = NULL)$holiday
datcase$weekend <- aggregate(weekdays ~ case, dat,
function(x) ifelse(any(x %in% c("Saturday", "Sunday")), 1, 0),
na.action = NULL)$weekdays
datcase$morning <- aggregate(date.start ~ case, dat,
function(x) ifelse(lubridate::hour(x[1]) > 13, 0, 1),
na.action = NULL)$date.start
dat_split <- split(dat, ~ case)
time_minmax <- function(subdata) {
subdata$min_time <- min(subdata$timeMs.start)
if (all(is.na(subdata$timeMs.stop))) {
subdata$max_time <- NA
} else {
subdata$max_time <- max(subdata$timeMs.stop, na.rm = TRUE)
}
subdata
}
2024-03-01 17:39:07 +01:00
# TODO: Export from package mtt
dat_list <- pbapply::pblapply(dat_split, time_minmax)
dat_minmax <- dplyr::bind_rows(dat_list)
datcase$min_time <- aggregate(min_time ~ case, dat_minmax, unique)$min_time
datcase$max_time <- aggregate(max_time ~ case, dat_minmax, unique)$max_time
datcase$duration <- datcase$max_time - datcase$min_time
datcase$min_time <- NULL
datcase$max_time <- NULL
cor_mat <- cor(datcase[, -1], use = "pairwise")
diag(cor_mat) <- NA
heatmap(cor_mat)
# TODO: Add info if all items of a case are information cards??
# Navigation types by Bousbia et al. (2010):
# - Overviewing: this value is close to the Canter “scanning” value. It
# implies that the learner is covering a large proportion of course pages.
# Through this phase of fast-reading, the user seeks to acquire an
# overall view of the course.
# - Flitting: close to “wandering”. It reflects a browsing activity without a
# strategy or a particular goal. The main difference with the overviewing
# type is the lack of focus on the course.
# - Studying: corresponds to a partial or complete reading of the course
# pages where the learner spends time on each page.
# - Deepening: This describes a learner who spends relatively long time on a
# course, checking details, and seeking Web documents related to the course
# topics. The main difference with studying is the Web search part that the
# learner uses to obtain a deeper understanding of the course.
# Taxonomy defined by Canter et al. (1985):
# - Scanning: seeking an overview of a theme (i.e. subpart of the hypermedia)
# by requesting an important proportion of its pages but without spending
# much time on them.
# - Browsing: going wherever the data leads the navigator until catching an
# interest.
# - Exploring: reading the viewed pages thoroughly.
# - Searching: seeking for a particular document or information.
# - Wandering: navigating in an unstructured fashion without any particular
# goal or strategy.
# Features for navigation types for MTT:
# - Scanning / Overviewing:
# * Proportion of artworks looked at is high: datcase$nitems / 70
# * Duration per artwork is low: "ave_duration_item" / datcase$duration
# - Exploring:
# * Looking at additional information for most items touched (high value):
# harmonic mean of datcase$nopenTopic / datcase$nflipCard and
# datcase$nopenPopup / datcase$nopenTopic
# - Searching / Studying:
# * Looking only at a few items
# datcase$nitems / 70 is low
# * Opening few cards
# datcase$nflipCard / mean(datcase$nflipCard) or median(datcase$nflipCard) is low
# * but for most cards popups are opened:
# datcase$nopenPopup / datcase$nflipCard is high
# - Wandering / Flitting:
# * Items are mostly just moved:
# datcase$nmove / datcase$length is high
# * Duration per case is low:
# datcase$duration / mean(datcase$duration) or median(datcase$duration)
# * Duration per artwork is low: "ave_duration_item" / datcase$duration
dattree <- data.frame(case = datcase$case,
Duration = datcase$duration,
PropItems = datcase$nitems / length(unique(dat$item)),
2024-03-01 17:39:07 +01:00
PropTopic = datcase$nopenTopic / datcase$nflipCard,
PropPopup = datcase$nopenPopup / datcase$nopenTopic,
# SearchInfo =
# 2*(((datcase$nopenPopup / datcase$nopenTopic) *
# (datcase$nopenTopic / datcase$nflipCard)) /
# ((datcase$nopenPopup / datcase$nopenTopic) +
# (datcase$nopenTopic / datcase$nflipCard))
# ),
PropMoves = datcase$nmove / datcase$length,
PathLinearity = datcase$nitems / datcase$npaths,
Singularity = datcase$npaths / datcase$length
)
2024-03-01 17:39:07 +01:00
#dattree$SearchInfo <- ifelse(dattree$SearchInfo %in% 0, 0.1, dattree$SearchInfo)
#dattree$SearchInfo <- ifelse(is.na(dattree$SearchInfo), 0, dattree$SearchInfo)
dattree$PropTopic <- ifelse(is.na(dattree$PropTopic), 0, dattree$PropTopic)
dattree$PropPopup <- ifelse(is.na(dattree$PropPopup), 0, dattree$PropPopup)
get_centrality <- function(case, data) {
data$start <- data$date.start
data$complete <- data$date.stop
alog <- activitylog(data[data$case == case, ],
case_id = "case",
activity_id = "item",
resource_id = "path",
timestamps = c("start", "complete"))
net <- process_map(alog, render = FALSE)
inet <- DiagrammeR::to_igraph(net)
c(igraph::centr_degree(inet, loops = FALSE)$centralization,
igraph::centr_degree(inet, loops = TRUE)$centralization,
igraph::centr_betw(inet)$centralization)
}
centrality <- lapply(dattree$case, get_centrality, data = dat)
centrality <- do.call(rbind, centrality)
save(centrality, file = "results/haum/tmp_centrality.RData")
dattree$centr_degree <- centrality[, 1]
dattree$centr_degree_loops <- centrality[, 2]
dattree$centr_between <- centrality[, 3]
2024-03-01 17:39:07 +01:00
## Add average duration per item
dat_split <- split(dat[, c("item", "case", "path", "timeMs.start", "timeMs.stop")], ~ path)
dat_list <- pbapply::pblapply(dat_split, time_minmax)
dat_minmax <- dplyr::bind_rows(dat_list)
tmp <- aggregate(min_time ~ path, dat_minmax, unique)
tmp$max_time <- aggregate(max_time ~ path, dat_minmax, unique, na.action = NULL)$max_time
tmp$duration <- tmp$max_time - tmp$min_time
tmp$case <- aggregate(case ~ path, dat_minmax, unique)$case
dattree$AvDurItem <- aggregate(duration ~ case, tmp, mean)$duration
rm(tmp)
par(mfrow = c(3,3))
hist(dattree$Duration, breaks = 50, main = "")
hist(dattree$SearchInfo, breaks = 50, main = "")
hist(dattree$PropItems, breaks = 50, main = "")
hist(dattree$PropMoves, breaks = 50, main = "")
hist(dattree$PathLinearity, breaks = 50, main = "")
hist(dattree$Singularity, breaks = 50, main = "")
hist(dattree$centr_degree, breaks = 50, main = "")
hist(dattree$centr_degree_loops, breaks = 50, main = "")
hist(dattree$centr_between, breaks = 50, main = "")
cor_mat <- cor(dattree[, -1], use = "pairwise")
diag(cor_mat) <- NA
heatmap(cor_mat)
dattree$Pattern <- "Dispersion"
dattree$Pattern <- ifelse(dattree$PathLinearity > 0.8 &
dattree$Singularity > 0.8, "Scholar",
dattree$Pattern)
dattree$Pattern <- ifelse(dattree$PathLinearity <= 0.8 &
dattree$centr_between > 0.5, "Star",
dattree$Pattern)
write.table(dattree,
file = "results/haum/dattree.csv",
sep = ";",
quote = FALSE,
row.names = FALSE)
#--------------- (2) Clustering ---------------
2024-03-01 17:39:07 +01:00
df <- dattree[, c("AvDurItem", "PropItems", "PropTopic", "PropPopup", "PropMoves")]
#df <- dattree[, c("AvDurItem", "PropItems", "SearchInfo", "PropMoves")]
# TODO: With or without duration? Why is it relevant?
2024-03-01 17:39:07 +01:00
df$Scholar <- ifelse(dattree$Pattern == "Scholar", 1, 0)
df$Star <- ifelse(dattree$Pattern == "Star", 1, 0)
df$Dispersion <- ifelse(dattree$Pattern == "Dispersion", 1, 0)
# scale Duration and min/max SearchInfo
2024-03-01 17:39:07 +01:00
df$AvDurItem <- as.numeric(scale(df$AvDurItem))
#df$SearchInfo <- (df$SearchInfo - min(df$SearchInfo)) /
# (max(df$SearchInfo) - min(df$SearchInfo))
df$PropTopic <- (df$PropTopic - min(df$PropTopic, na.rm = TRUE)) /
(max(df$PropTopic, na.rm = TRUE) - min(df$PropTopic, na.rm = TRUE))
df$PropPopup <- (df$PropPopup - min(df$PropPopup, na.rm = TRUE)) /
(max(df$PropPopup, na.rm = TRUE) - min(df$PropPopup, na.rm = TRUE))
mat <- dist(df)
# TODO: Do I need to scale all variables?
h1 <- hclust(mat, method = "average")
h2 <- hclust(mat, method = "complete")
h3 <- hclust(mat, method = "ward.D")
h4 <- hclust(mat, method = "ward.D2")
h5 <- hclust(mat, method = "single")
# Cophenetic Distances, for each linkage (runs quite some time!)
c1 <- cophenetic(h1)
c2 <- cophenetic(h2)
c3 <- cophenetic(h3)
c4 <- cophenetic(h4)
c5 <- cophenetic(h5)
# Correlations
cor(mat, c1)
cor(mat, c2)
cor(mat, c3)
cor(mat, c4)
cor(mat, c5)
# https://en.wikipedia.org/wiki/Cophenetic_correlation
# https://stats.stackexchange.com/questions/195446/choosing-the-right-linkage-method-for-hierarchical-clustering
hc <- h4
# Something like a scree plot (??)
plot(rev(hc$height)[1:100], type = "b", pch = 16, cex = .5)
k <- 4
2024-03-01 17:39:07 +01:00
grp_hclust <- cutree(hc, k = k)
2024-03-01 17:39:07 +01:00
table(grp_hclust)
2024-03-01 17:39:07 +01:00
fviz_cluster(list(data = df, cluster = grp_hclust),
palette = c("#78004B", "#FF6900", "#3CB4DC", "#91C86E", "black"),
ellipse.type = "convex",
show.clust.cent = FALSE,
ggtheme = theme_bw())
2024-03-01 17:39:07 +01:00
table(dattree[grp_hclust == 1, "Pattern"])
table(dattree[grp_hclust == 2, "Pattern"])
table(dattree[grp_hclust == 3, "Pattern"])
table(dattree[grp_hclust == 4, "Pattern"])
# Look at 3d plot to see if clusters are actually separate
pc <- prcomp(df)
coor <- as.data.frame(pc$x[, c(1, 2, 3)])
2024-03-01 17:39:07 +01:00
rgl::plot3d(coor, col = c("#78004B", "#FF6900", "#3CB4DC", "#91C86E")[grp_hclust])
2024-03-01 17:39:07 +01:00
aggregate(cbind(Duration, PropItems, SearchInfo, PropMoves, PathLinearity,
Singularity, centr_degree, centr_degree_loops,
centr_between) ~ grp_hclust, dattree, mean)
2024-03-01 17:39:07 +01:00
aggregate(cbind(duration, distance, scaleSize, rotationDegree, length,
nmove, nflipCard, nopenTopic, nopenPopup) ~ grp_hclust, datcase,
mean)
2024-03-01 17:39:07 +01:00
### DBSCAN clustering
2024-03-01 17:39:07 +01:00
library(dbscan)
d1 <- dbscan(df, eps = .5, minPts = 9)
hullplot(df, d1)
2024-03-01 17:39:07 +01:00
grp_db <- d1$cluster
table(grp_db)
2024-03-01 17:39:07 +01:00
kNNdistplot(df, k = 6)
abline(h = 0.5, col = "red")
fviz_cluster(list(data = df[grp_db != 0, ], cluster = grp_db[grp_db != 0]),
palette = c("#78004B", "#FF6900", "#3CB4DC", "#91C86E"),
ellipse.type = "convex",
show.clust.cent = FALSE,
ggtheme = theme_bw())
2024-03-01 17:39:07 +01:00
rgl::plot3d(coor, col = c("#78004B", "#FF6900", "#3CB4DC", "#91C86E")[grp_db + 1])
aggregate(. ~ grp_db, df, mean)
table(dattree[grp_db == 0, "Pattern"])
table(dattree[grp_db == 1, "Pattern"])
table(dattree[grp_db == 2, "Pattern"])
table(dattree[grp_db == 3, "Pattern"])
### K-Means clustering
k1 <- kmeans(df, 4)
grp_km <- k1$cluster
table(grp_km)
fviz_cluster(list(data = df, cluster = grp_km),
palette = c("#78004B", "#FF6900", "#3CB4DC", "#91C86E"),
ellipse.type = "convex",
show.clust.cent = FALSE,
ggtheme = theme_bw())
rgl::plot3d(coor, col = c("#78004B", "#FF6900", "#3CB4DC", "#91C86E")[grp_km])
### Look at selected cases ###########################################
2024-03-01 17:39:07 +01:00
tmp <- res
tmp$start <- tmp$date.start
tmp$complete <- tmp$date.stop
2024-03-01 17:39:07 +01:00
alog <- activitylog(tmp[tmp$case == 30855, ],
case_id = "case",
activity_id = "item",
resource_id = "path",
timestamps = c("start", "complete"))
process_map(alog)
2024-03-01 17:39:07 +01:00
######################################################################
res <- merge(dat, dattree[, c("case", "grp")], by = "case", all.x = TRUE)
res <- res[order(res$fileId.start, res$date.start, res$timeMs.start), ]
2024-03-01 17:39:07 +01:00
rm(dat)
xtabs( ~ item + grp, res)
aggregate(event ~ grp, res, table)
# Look at clusters
par(mfrow = c(2, 2))
vioplot::vioplot(duration ~ grp, res)
vioplot::vioplot(distance ~ grp, res)
vioplot::vioplot(scaleSize ~ grp, res)
vioplot::vioplot(rotationDegree ~ grp, res)
aggregate(cbind(duration, distance, scaleSize, rotationDegree) ~ grp, res, mean)
aggregate(cbind(duration, distance, scaleSize, rotationDegree) ~ grp, res, median)
write.table(res,
file = "results/haum/event_logfiles_pre-corona_with-clusters_cases.csv",
sep = ";",
quote = FALSE,
row.names = FALSE)
2024-03-01 17:39:07 +01:00
save(res, mat, h1, h2, h3, h4, h5, c1, c2, c3, c4, c5, datcase, dattree, df,
file = "results/haum/tmp_user-navigation.RData")
#--------------- (3) Fit tree ---------------
library(rpart)
library(partykit)
2024-03-01 17:39:07 +01:00
## dbscan
dattree_db <- dattree[grp_db != 0, ]
2024-03-01 17:39:07 +01:00
dattree_db$grp <- factor(grp_db[grp_db != 0])
dattree_db$Pattern <- factor(dattree_db$Pattern)
c1 <- rpart(grp ~ AvDurItem + PropItems + SearchInfo + PropMoves +
Pattern, data = dattree_db, method = "class")
c1 <- rpart(grp_db ~ AvDurItem + PropItems + PropTopic + PropPopup + PropMoves +
Pattern, data = dattree, method = "class")
2024-03-01 17:39:07 +01:00
plot(as.party(c1))
2024-03-01 17:39:07 +01:00
c1a <- rpart(grp_db ~ AvDurItem + PropItems + SearchInfo + PropMoves +
Pattern, data = dattree, method = "class")
plot(as.party(c1a))
c2 <- rpart(grp ~ PropItems + SearchInfo + PropMoves + Pattern,
2024-03-01 17:39:07 +01:00
data = dattree_db, method = "class")
plot(as.party(c2))
2024-03-01 17:39:07 +01:00
# with conditional tree function
c3 <- ctree(as.factor(grp_db) ~ AvDurItem + PropItems + PropTopic + PropPopup +
PropMoves + as.factor(Pattern), data = dattree, alpha = 1)
plot(c3)
cluster <- as.factor(grp_db[grp_db != 0])
c4 <- ctree(cluster ~ nmove + nflipCard + nopenTopic + nopenPopup,
data = datcase[grp_db != 0, ], alpha = .001)
plot(c4)
c5 <- ctree(cluster ~ duration,
data = datcase[grp_db != 0, ], alpha = .001)
plot(c5)
## hclust
c1 <- rpart(as.factor(grp_hclust) ~ AvDurItem + PropItems + SearchInfo + PropMoves +
Pattern, data = dattree, method = "class")
plot(as.party(c1))
c3 <- ctree(as.factor(grp_hclust) ~ AvDurItem + PropItems + SearchInfo +
PropMoves + as.factor(Pattern), data = dattree, alpha = 0)
plot(c3)
c4 <- ctree(as.factor(grp_hclust) ~ nmove + nflipCard + nopenTopic + nopenPopup,
data = datcase, alpha = .001)
plot(c4)
#--------------- (4) Investigate variants ---------------
2024-02-07 13:26:35 +01:00
res$start <- res$date.start
res$complete <- res$date.stop
alog <- activitylog(res,
case_id = "case",
activity_id = "item",
resource_id = "path",
timestamps = c("start", "complete"))
2024-02-07 13:26:35 +01:00
trace_explorer(alog, n_traces = 25)
# --> sequences of artworks are just too rare
tr <- traces(alog)
trace_length <- sapply(strsplit(tr$trace, ","), length)
tr[trace_length > 10, ]
trace_varied <- sapply(strsplit(tr$trace, ","), function(x) length(unique(x)))
tr[trace_varied > 1, ]
table(tr[trace_varied > 2, "absolute_frequency"])
table(tr[trace_varied > 3, "absolute_frequency"])
2024-02-07 13:26:35 +01:00
summary(tr$absolute_frequency)
vioplot::vioplot(tr$absolute_frequency)
# Power law for frequencies of traces
tab <- table(tr$absolute_frequency)
x <- as.numeric(tab)
y <- as.numeric(names(tab))
2024-02-07 13:26:35 +01:00
plot(x, y, log = "xy")
p1 <- lm(log(y) ~ log(x))
pre <- exp(coef(p1)[1]) * x^coef(p1)[2]
lines(x, pre)
# Look at individual traces as examples
tr[trace_varied == 5 & trace_length > 50, ]
# --> every variant exists only once, of course
datcase[datcase$nitems == 5 & datcase$length > 50,]
sapply(datcase[, -c(1, 9)], median)
2024-02-07 13:26:35 +01:00
#ex <- datcase[datcase$nitems == 4 & datcase$length == 15,]
ex <- datcase[datcase$nitems == 5,]
ex <- ex[sample(1:nrow(ex), 20), ]
# --> pretty randomly chosen... TODO:
2024-02-07 13:26:35 +01:00
case_ids <- NULL
2024-02-07 13:26:35 +01:00
for (case in ex$case) {
if ("080" %in% res$item[res$case == case] | "503" %in% res$item[res$case == case]) {
case_ids <- c(case_ids, TRUE)
} else {
case_ids <- c(case_ids, FALSE)
}
}
2024-02-07 13:26:35 +01:00
cases <- ex$case[case_ids]
2024-02-07 13:26:35 +01:00
for (case in cases) {
alog <- activitylog(res[res$case == case, ],
case_id = "case",
activity_id = "item",
resource_id = "path",
timestamps = c("start", "complete"))
2024-02-07 13:26:35 +01:00
dfg <- process_map(alog,
type_nodes = frequency("absolute", color_scale = "Greys"),
type_edges = frequency("absolute", color_edges = "#FF6900"),
rankdir = "LR",
render = FALSE)
export_map(dfg,
2024-02-07 13:26:35 +01:00
file_name = paste0("results/processmaps/dfg_example_cases_", case, "_R.pdf"),
file_type = "pdf",
2024-02-07 13:26:35 +01:00
title = paste("Case", case))
}
########################### TODO: Still need it?
net <- process_map(alog, render = FALSE)
#DiagrammeR::get_node_df(net)
DiagrammeR::get_node_info(net)
DiagrammeR::get_degree_distribution(net)
DiagrammeR::get_degree_in(net)
DiagrammeR::get_degree_out(net)
DiagrammeR::get_degree_total(net)
N <- DiagrammeR::count_nodes(net) - 2 # Do not count start and stop nodes
dc <- DiagrammeR::get_degree_total(net)[1:N, "total_degree"] / (N - 1)
inet <- DiagrammeR::to_igraph(net)
igraph::centr_degree(inet, loops = FALSE)
igraph::centr_betw(inet)
igraph::centr_clo(inet)