#===================
# Umely priklad ####

library(mvtnorm)

# x-ka
set.seed(13579)
set.seed(12345)
L <- 30
n <- 2*L
k <- 5
Sigma <- matrix(0.95, nrow=k, ncol=k)
diag(Sigma) <- 1
X <- rmvnorm(n, sigma=Sigma)

# y = b1 + b2*x1 + eps
eps <- rnorm(n)
b <- c(1, 2)
y <- b[1] + b[2] * X[, 1] + eps

# trenovacia, testovacia
ytrain <- y[1:L]
Xtrain <- X[1:L, ]
ytest <- y[(L+1):n]
Xtest <- X[(L+1):n, ]

# Regresia: y ~ x1
plot(Xtrain[, 1], ytrain, lwd=2)

m1 <- lm(ytrain ~ Xtrain[, 1])
b1 <- m1$coef
b  # skutocne
b1 # odhad
abline(b1, lwd=2, col="red")

# Druhe x
plot(X, lwd=2, main = "Regresory") # x1 vs x2

# Regresia: y ~ x1 + x2
m2 <- lm(ytrain ~ Xtrain[, 1] + Xtrain[, 2])
b2 <- m2$coef

b  # skutocne
b1 # odhad v M1
b2 # odhad v M2

# y ~ vsetky x-ka
m3 <- lm(ytrain ~ Xtrain)
b3 <- m3$coef

b  # skutocne
b1 # odhad v M1
b2 # odhad v M2
b3 # odhad v M3

# sucet okrem interceptu
b1
sum(b2[-1])
sum(b3[-1])

# sucet druhych mocnin
sum(b1[-1]^2)
sum(b2[-1]^2)
sum(b3[-1]^2)

# sucet abs. hodnot
sum(abs(b1[-1]))
sum(abs(b2[-1]))
sum(abs(b3[-1]))

# Kvalita predikcie
yhat1 <- cbind(1, Xtest[, 1]) %*% b1
yhat2 <- cbind(1, Xtest[, 1], Xtest[, 2]) %*% b2
yhat3 <- cbind(1, Xtest) %*% b3
plot(ytest, yhat1, col="red", lwd=2, main="Predikcia vs skutocnost",
     ylim = c(min(yhat1, yhat2, yhat3), max(yhat1, yhat2, yhat3)))
abline(c(0,1))
legend("bottomright", legend=c("M1", "M2", "M3"), lwd=2, 
       col=c("red", "blue", "green"), pch=1, lty=0)
points(ytest, yhat2, col="blue", lwd=2)
points(ytest, yhat3, col="green", lwd=2)

# MSE test
MSE1 <- sum((yhat1 - ytest)^2)/L
MSE2 <- sum((yhat2 - ytest)^2)/L
MSE3 <- sum((yhat3 - ytest)^2)/L

MSE1
MSE2
MSE3

# Ridge regression
library(MASS)  # lm.ridge
m.ridge <- lm.ridge(ytrain ~ Xtrain, lambda = 2)
bridge <- m.ridge$coef

# porovnanie s ostatnymi
b3
bridge

# sucty
b1
sum(b3[-1])
sum(bridge)

# sucty stvorcov
sum(b1[-1]^2)
sum(b3[-1]^2)
sum(bridge^2)

#========================
# Ridge: realne data ####

# Hitters data
library(ISLR2)
?Hitters
data(Hitters)
View(Hitters)

data <- na.omit(Hitters) # vyradit NA
n <- nrow(data)

library(MASS)  # lm.ridge

# Rozdelenie na Train, Val
set.seed(278643)
n.val <- round(n/10)
n.train <- n - n.val
ind.train <- sample.int(n, n.train, replace=FALSE)
ind.val <- (1:n)[-ind.train]
train <- data[ind.train, ]
val <- data[ind.val, ]

X.all <- model.matrix(Salary~., data=data)
X.train0 <- X.all[ind.train, ]
y.train0 <- train$Salary
X.val0 <- X.all[ind.val, ]
y.val0 <- val$Salary

# Centrovanie a skalovanie
X0 <- X.train0[, -1]   # bez interceptu
   # pozor: ak by ste dali rovno Salary~.-1, tak Rko prida
   # jednu dummy premennu navyse, co je neziaduce
mena <- colnames(X0)
s <- sqrt(diag(var(X0)))  # standardne odchylky
Xm <- apply(X0,2,mean)    # priemery
X <- ( X0 - tcrossprod(rep(1,n.train),Xm) ) %*% diag(1/s)
ym <- mean(y.train0)
y <- y.train0 - ym
colnames(X) <- mena

# tie iste transformacie pre val
# ... od X odpocitame priemer X.train a predelime odchylkami X.train
# ... od y odpocitame priemer y.train
X.val1 <- X.val0[, -1]
X.val <- ( X.val1 - tcrossprod(rep(1, n.val), Xm) ) %*% diag(1/s)
y.val <- y.val0 - ym

# Plny model
hit.full <- lm(y ~ X - 1)
summary(hit.full)
b.full <- hit.full$coef

# RMSE train, val
RMSE.train <- sqrt(sum((hit.full$residuals)^2) / n.train)
pred <- X.val %*% b.full
RMSE.val <- sqrt(sum((y.val - pred)^2))

summary(y)
RMSE.train
RMSE.val # pretrenovanie?

# Ridge pre rozne lambdy
lam <- seq(0, 100, 0.5)
hit.ridge <- lm.ridge(y ~ X - 1, lambda = lam)
b.ridge <- hit.ridge$coef
View(b.ridge)

# Priebeh koeficientov pre vybrane premenne
par(mfrow=c(3,4))
for(i in 1:nrow(b.ridge)) {
   plot(lam, b.ridge[i, ], type="l", lwd=2, main=mena[i])
}
# vytvori dva po sebe iduce obrazky
# (ak mate rozumny monitor, nastavte mfrow=c(4,5))

# Validacia, volba lambdy
L <- length(lam)
y.pred <- tcrossprod(y.val, rep(1,L))    # matica (y.val, y.val, ..., y.val)
pred <- X.val %*% b.ridge                # matica predikcii (vsetky lambdy)
                                         # lm.ridge() nema fciu predict()
MSE.val <- colSums((y.pred - pred)^2)/n.val  # vektor MSE
graphics.off()
plot(lam, MSE.val, type="l", lwd=3, main="Validacna chyba")

# Najlepsi model
lopt <- which(MSE.val==min(MSE.val))                  # najlepsia lambda
lam[lopt]
hit.opt <- lm.ridge(y ~ X - 1, lambda = lam[lopt])  # prislusny model
lines(c(lam[lopt],lam[lopt]), c(0,MSE.val[lopt]), lwd=3, col="red")

# Porovnanie s plnym
bety <- cbind(b.full, hit.opt$coef)
colnames(bety) <- c("Plny", "Ridge")
bety

RMSE.val # RMSE_val plneho
sqrt(min(MSE.val)) # RMSE_val validacne ziskaneho
  # na Val ma druhy pochopitelne mensiu chybu

# Vyskusajte manualne aj krosvalidaciu

# Mozeme to spravit aj pomocou funkcii glmnet a cv.glmnet, vid lasso nizsie

# Mohli sme na zaciatku odstrihnut este 10 percent dat a tie pokladat za testovacie
# Na nich potom porovnavat kvality zvolenych modelov

#======
# Lasso

library(glmnet)

# na vsetkych datach
X <- X.all[, -1] # bez interceptu
y <- data$Salary

mLasso.full <- glmnet(X, y, family="gaussian", alpha=1)
# gaussian: linearna regresia
# alpha=1: lasso, alpha=0: ridge
# prida intercept
# default: intercept=TRUE ... prida intercept
# default: standardize=TRUE ... standardizuje 
#          (bety nakoniec vypise pre povodne mierky)
b.lasso <- mLasso.full$beta   # odhadnute koeficienty pre kazdu lambdu
pocty.full <- colSums(abs(b.lasso)>1e-9)  # pocty nenulovych koeficientov
lams <- mLasso.full$lambda    # sam si zvoli lambdy

# Priebeh koeficientov pre vybrane premenne
par(mfrow=c(3,4))
for(i in 1:nrow(b.lasso)) {
   plot(lams, b.lasso[i, ], type="l", lwd=2, main=row.names(b.lasso)[i])
}
# zase 2 obrazky

# pocty nenulovych koeficientov
dev.off()
plot(lams, pocty.full, type="l", lwd=2, main="Nenulove koeficienty")

# na spravenie ridge regresie staci nastavit alpha=0

#--------------
# Krosvalidacia

set.seed(159753)
mLasso <- cv.glmnet(X, y, family="gaussian", alpha=1, nfolds=10)
# pozri ?glmnet, ?cv.glmnet
lams <- mLasso$lambda       # sam si zvoli lambdy
chyby <- mLasso$cvm         # prislusne krosvalidacne chyby
lmin <- mLasso$lambda.min   # najlepsia lambda
pocty <- mLasso$nzero       # pocty nenulovych koeficientov

# priebeh MSE
plot(lams, chyby, type="l", lwd=2, main="Krosvalidacna chyba")
lines(c(lmin,lmin), c(0,chyby[lams==lmin]), lwd=2, col="red")

# pocty nenulovych koeficientov beta_lasso
plot(lams, pocty, type="l", lwd=2, main="Nenulove koeficienty")
points(lmin, pocty[lams==lmin], pch=16, cex=1.5, col="red")

# model s najlepsou lambdou
pocty[lams==lmin] #  pocet nenulovych parametrov
mLasso.min <- glmnet(X, y, family="gaussian", alpha=1, lambda=lmin)
mLasso.min$beta

# Lambdy mozeme aj zvolit
lams2 <- seq(0,10,0.2)
mLasso2 <- cv.glmnet(X, y, family="gaussian", alpha=1, lambda=lams2, nfolds=50)
lams2 <- mLasso2$lambda       # preusporiada si lambdy po svojom (od najvyssej)
lmin2 <- mLasso2$lambda.min   # najlepsia lambda
pocty2 <- mLasso2$nzero
chyby2 <- mLasso2$cvm

# priebeh MSE
plot(lams2, chyby2, type="l", lwd=2, main="Krosvalidacna chyba")
lines(c(lmin2,lmin2), c(0,chyby2[lams2==lmin2]), lwd=2, col="red")

# pocty nenulovych koeficientov beta_lasso
plot(lams2, pocty2, type="l", lwd=2, main="Nenulove koeficienty")
points(lmin2, pocty2[lams2==lmin2], pch=16, cex=1.5, col="red")

# Vyskusajte porovnat aj Lasso modely pomocou testovacej vzorky

