library(keras)  # neuronove siete

# instalacia:
# install.packages("remotes")
# remotes::install_github(sprintf("rstudio/%s", c("reticulate", "tensorflow", "keras")))
# if (is.null(reticulate::virtualenv_starter()))
#   reticulate::install_python()
# keras3::install_keras()
#
# (stara) instalacia:
# install.packages("keras")
# library(keras)
# install_keras()
# ... potrebuje Python => pripadne teda Would you like to install Miniconda? [Y/n]: Y

#====================
# Neuronova siet ####

# Normalna NS pre MNIST
# jednoducha, nie specializovana na obrazky
# pre obrazky: convolutional neural network
#              vyzaduje x-ka ako 28x28 matice, nie 784x1 vektory

# Data
M <- read.table("MNIST.txt")
lab.all <- to_categorical(M[, 1], num_classes = 10)  # prekodovanie pre potreby NS
head(lab.all)

X.all <- as.matrix(M[,2:ncol(M)] / 255)  
# predelene, aby boli v [0, 1]
# as.matrix: zbavit sa array - inak vyhodi trenovanie error
n <- nrow(X.all)

# Trenovacia a testovacia
ind.train <- sample(n, round(0.9*n), replace=FALSE)
X.train <- X.all[ind.train, ]
y.train <- lab.all[ind.train, ]
X.test <- X.all[-ind.train, ]
y.test <- lab.all[-ind.train, ]

ncol(X.train)

# Tvorba NS
model <- keras_model_sequential() %>%
  layer_dense(units = 256, activation = "relu", input_shape = c(784)) %>%
  layer_dropout(rate = 0.25) %>% 
  layer_dense(units = 128, activation = "relu") %>%
  layer_dropout(rate = 0.25) %>% 
  layer_dense(units = 64, activation = "relu") %>%
  layer_dropout(rate = 0.25) %>%
  layer_dense(units = 10, activation = "softmax")
summary(model)
# layer_dropout: nahodne cast vstupov nastavi na 0: zabranenie pretrenovaniu
# softmax: vypluje pravdepodobnosti kategorii

model %>% compile(
  loss = "categorical_crossentropy",
  optimizer = "adam",
  metrics = c("accuracy")
)
# loss: aku funkciu optimalizuje
# optimizer: akym algoritmom
# metrics: ake metriky vypisuje

history <- model %>% 
  fit(x = X.train, 
      y = y.train, 
      epochs = 20, 
      batch_size = 128)

model %>% evaluate(
  X.test, y.test)

y.pp <- model %>%
  predict(X.test)
# vypluje len pravdepodobnosti

y.pred <- apply(y.pp, 1, which.max) - 1
# predikovane triedy
# -1: od 0 po 9

cisla.test <- M[-ind.train, 1]
zle <- which(y.pred != cisla.test)
(nrow(y.test) - length(zle)) / nrow(y.test)  # accuracy manualne

# vykreslenie konkretneho zleho
pal <- gray.colors(256, start=0, end=1, rev=TRUE)
ind <- zle[2]
#ind <- 5  # pre "normalne" cisla
obr <- matrix(as.numeric(X.test[ind, ]), nrow=28, ncol=28, byrow=FALSE)
plot(1:28, 1:28, type="n")
for(i in 1:28)
  for(j in 1:28)
    points(i,29-j, pch=15, col=pal[round(obr[i,j]*255)+1])
print(paste("Skutocne:", cisla.test[ind]))
print(paste("Predikovane:", y.pred[ind]))
print("Pravdepodobnosti:")
rbind(0:9, round(y.pp[ind, ], 5))

#=================
# Autoencoder ####

# Zlozenie jedla
food <- read.table("01_food.txt", header=TRUE, sep=" ")
X <- food[,9:14]   # data predelene vahou
colnames(X) <- c("Fat", "Energy", "Carbohydrates", "Protein", "Cholesterol", "Saturated.fat")

# PCA
food.pca <- prcomp(X, scale=TRUE)
plot(food.pca$x[,1:2], cex=0.5, pch=16)
biplot(food.pca, cex=c(2,1), xlabs=rep(".", nrow(X)))  # aj s premennymi


#---------------
# Trivialny ####

x.train <- as.matrix(scale(X, center=TRUE, scale=TRUE)) # vstupne data ako PCA

# Vytvorenie siete
model <- keras_model_sequential()
model %>%
  layer_dense(units = 2, activation = "linear", input_shape = ncol(x.train),
              name = "bottleneck") %>%
  layer_dense(units = ncol(x.train))
# input_shape: ake su vstupy
# "linear": identita, 
# dalsie napr.: "relu", "sigmoid", "tanh", "softmax"
# units = 2 ... do 2-rozmeru
#
# vyskusat zmenit na nelinearny: napr. "sigmoid"

summary(model)

# ako budeme fitovat
model %>% compile(
  loss = "mean_squared_error", 
  optimizer = "adam"
)

# fitovanie
# vstup aj vystup: x.train
model %>% fit(
  x = x.train, 
  y = x.train, 
  epochs = 100,
  verbose = 0
)

# blizkost vystupov vstupom
MSE <- evaluate(model, x.train, x.train)
MSE

# vystupy kodovacej vrstvy
code.model <- keras_model(inputs = model$input, outputs = get_layer(model, "bottleneck")$output)
code.output <- predict(code.model, x.train)

head(code.output)
plot(code.output, cex=0.5, pch=16)

# Porovnanie s PCA
library(RColorBrewer)
col.pal <- rainbow(10)
cols <- col.pal[food$cluster.id]  # farby cluster.ID

par(mfrow=c(1, 2))
plot(code.output, cex=0.5, pch=16, col=cols)      # autoencoder
plot(food.pca$x[,1:2], cex=0.5, pch=16, col=cols) # PCA
dev.off()

#-----------------
# Netrivialny ####

x.train <- as.matrix(scale(X, center=FALSE, scale=TRUE))
ncol(x.train)

# set.seed keras neprijima, da sa nastavit takto:
#library(tensorflow)
#set_random_seed(1578)

# Vytvorenie siete
model2 <- keras_model_sequential()
model2 %>%
  layer_dense(units = 4, activation = "sigmoid", input_shape = ncol(x.train)) %>%
  layer_dense(units = 2, activation = "sigmoid", name = "bottleneck") %>%
  layer_dense(units = 4, activation = "sigmoid") %>%
  layer_dense(units = ncol(x.train))
# vyskusat zmenit velkosti skrytych vrstiev

summary(model2)

# ako budeme fitovat
model2 %>% compile(
  loss = "mean_squared_error", 
  optimizer = "adam"
)

# fitovanie
# vstup aj vystup: x.train
model2 %>% fit(
  x = x.train, 
  y = x.train, 
  epochs = 100,
  verbose = 0
)

# blizkost vystupov vstupom
MSE2 <- evaluate(model2, x.train, x.train)
MSE2

# vystupy kodovacej vrstvy
code.model2 <- keras_model(inputs = model2$input, outputs = get_layer(model2, "bottleneck")$output)
code.output2 <- predict(code.model2, x.train)

plot(code.output2, cex=0.5, pch=16)

# Porovnanie s PCA
par(mfrow=c(1, 2))
plot(code.output2, cex=0.5, pch=16, col=cols)      # autoencoder
plot(food.pca$x[,1:2], cex=0.5, pch=16, col=cols)  # PCA
dev.off()
  # nahodnost => rozne vystupy pri roznych spusteniach

# zobrazenie mnozstva tukov/... v jedle
n <- nrow(X)
plot(code.output2[order(X[,1]), ], cex=0.8, pch=19, col=heat.colors(n)[n:1], main="Tuky",
     xlab="h1", ylab="h2")
plot(code.output2[order(X[,6]), 1:2], cex=0.8, pch=19, col=heat.colors(n)[n:1], main="Nasytene tuky", 
     xlab="h1", ylab="h2")  # usporiadane podla nasytenych tukov
plot(code.output2[order(X[,2]), 1:2], cex=0.8, pch=19, col=heat.colors(n)[n:1], main="Kalorie", 
     xlab="h1", ylab="h2")  # usporiadane podla kalorii
plot(code.output2[order(X[,3]), 1:2], cex=0.8, pch=19, col=heat.colors(n)[n:1], main="Cukry", 
     xlab="h1", ylab="h2")  # usporiadane podla sacharidov
plot(code.output2[order(X[,4]), 1:2], cex=0.8, pch=19, col=heat.colors(n)[n:1], main="Bielkoviny", 
     xlab="h1", ylab="h2")  # usporiadane podla bielkovin
plot(code.output2[order(X[,5]), 1:2], cex=0.8, pch=19, col=heat.colors(n)[n:1], main="Cholesterol", 
     xlab="h1", ylab="h2")  # usporiadane podla cholesterolu

#===========
# MNIST ####

M <- read.table("MNIST.txt")
lab <- M[, 1]
X.all <- as.matrix(M[,2:ncol(M)] / 255)  
# predelene, aby boli v [0, 1]
# as.matrix: zbavit sa array - inak vyhodi trenovanie error
n <- nrow(X.all)

x.train <- X.all
ncol(x.train)

# set.seed keras neprijima, da sa nastavit takto:
#library(tensorflow)
#set_random_seed(1578)

# Vytvorenie siete
model3 <- keras_model_sequential()
model3 %>%
  layer_dense(units = 28, activation = "sigmoid", input_shape = ncol(x.train)) %>%
  layer_dense(units = 2, activation = "sigmoid", name = "bottleneck") %>%
  layer_dense(units = 28, activation = "sigmoid") %>%
  layer_dense(units = ncol(x.train))
# vyskusat zmenit velkosti skrytych vrstiev

summary(model3)

# ako budeme fitovat
model3 %>% compile(
  loss = "mean_squared_error", 
  optimizer = "adam"
)

# fitovanie
# vstup aj vystup: x.train
model3 %>% fit(
  x = x.train, 
  y = x.train, 
  epochs = 10,
  verbose = 1
)

# blizkost vystupov vstupom
MSE3 <- evaluate(model3, x.train, x.train)
MSE3

# vystupy kodovacej vrstvy
code.model3 <- keras_model(inputs = model3$input, outputs = get_layer(model3, "bottleneck")$output)
code.output3 <- predict(code.model3, x.train)

plot(code.output3, cex=0.5, pch=16)

# Farby
library(RColorBrewer) # brewer.pal
col.pal <- c(brewer.pal(9, "Set1"),"black")
cols <- col.pal[lab+1]

par(mar=(c(5, 4, 4, 4) + 0.1))
plot(code.output3, pch=16, cex=0.5, col=cols)
lims <- par("usr")
legend(lims[2],lims[4], legend=0:9, pch=16, col=col.pal, xpd=NA)

