Caret package Custom metric

MarcelRitos picture MarcelRitos · Jun 6, 2016 · Viewed 10.4k times · Source

I'm using the caret function "train()" in one of my project and I'd like to add a "custom metric" F1-score. I looked at this url caret package But I cannot understand how I can build this score with the parameter available.

There is an example of custom metric which is the following:

## Example with a custom metric
madSummary <- function (data,
lev = NULL,
model = NULL) {
out <- mad(data$obs - data$pred,
na.rm = TRUE)
names(out) <- "MAD"
out
}
robustControl <- trainControl(summaryFunction = madSummary)
marsGrid <- expand.grid(degree = 1, nprune = (1:10) * 2)
earthFit <- train(medv ~ .,
data = BostonHousing,
method = "earth",
tuneGrid = marsGrid,
metric = "MAD",
maximize = FALSE,
trControl = robustControl)

Update:

I tried your code but the problem is that it doesn't work with multiple classes like with the code below (The F1 score is displayed, but it is weird) I'm not sure but I think the function F1_score works only on binary classes

library(caret)
library(MLmetrics)

set.seed(346)
dat <- iris

## See http://topepo.github.io/caret/training.html#metrics
f1 <- function(data, lev = NULL, model = NULL) {

print(data)
  f1_val <- F1_Score(y_pred = data$pred, y_true = data$obs)
  c(F1 = f1_val)
}

# Split the Data into .75 input
in_train <- createDataPartition(dat$Species, p = .70, list = FALSE)

trainClass <- dat[in_train,]
testClass <- dat[-in_train,]



set.seed(35)
mod <- train(Species ~ ., data = trainClass ,
             method = "rpart",
             metric = "F1",
             trControl = trainControl(summaryFunction = f1, 
                                  classProbs = TRUE))

print(mod)

I coded a manual F1 score as well, with one input the confusion matrix: (I'm not sure if we can have a confusion matrix in "summaryFunction"

F1_score <- function(mat, algoName){

##
## Compute F1-score
##


# Remark: left column = prediction // top = real values
recall <- matrix(1:nrow(mat), ncol = nrow(mat))
precision <- matrix(1:nrow(mat), ncol = nrow(mat))
F1_score <- matrix(1:nrow(mat), ncol = nrow(mat))


for(i in 1:nrow(mat)){
  recall[i] <- mat[i,i]/rowSums(mat)[i]
  precision[i] <- mat[i,i]/colSums(mat)[i]
}

for(i in 1:ncol(recall)){
   F1_score[i] <- 2 * ( precision[i] * recall[i] ) / ( precision[i] + recall[i])
 }

 # We display the matrix labels
 colnames(F1_score) <- colnames(mat)
 rownames(F1_score) <- algoName

 # Display the F1_score for each class
 F1_score

 # Display the average F1_score
 mean(F1_score[1,])
}

Answer

topepo picture topepo · Jun 11, 2016

You should look at The caret Package - Alternate Performance Metrics for details. A working example:

library(caret)
library(MLmetrics)

set.seed(346)
dat <- twoClassSim(200)

## See https://topepo.github.io/caret/model-training-and-tuning.html#metrics
f1 <- function(data, lev = NULL, model = NULL) {
  f1_val <- F1_Score(y_pred = data$pred, y_true = data$obs, positive = lev[1])
  c(F1 = f1_val)
}

set.seed(35)
mod <- train(Class ~ ., data = dat,
             method = "rpart",
             tuneLength = 5,
             metric = "F1",
             trControl = trainControl(summaryFunction = f1, 
                                      classProbs = TRUE))