pytorch: "multi-target not supported" error message

Theodor Peifer picture Theodor Peifer · Aug 2, 2019 · Viewed 13.2k times · Source

so I want to classify some (3,50,50) pictures. First I loaded the dataset from the file without a dataloader or batches, it worked. Now, after adding both things I get that error:

RuntimeError: multi-target not supported at /pytorch/aten/src/THCUNN/generic/ClassNLLCriterion.cu:15

I found a lot of answers in the internet, mostly to use "target.squeeze(1)" but it doesn´t work for me. My target-batch looks like following:

tensor([[1, 0],
        [1, 0],
        [1, 0],
        [1, 0],
        [1, 0],
        [1, 0],
        [1, 0],
        [1, 0]], device='cuda:0')

Shouldnt that be okay?

Here the full code (notice that Im only creating the structure of the model on which Im going to apply the full and correct dataset afterwards, because I dont have the full data yet, only 32 pictures and no labels, thats why I added "torch.tensor([1, 0]) as a placeholder for all labels):

import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
from torch.autograd import Variable

import numpy as np
from PIL import Image


class Model(nn.Module):

    def __init__(self):
        super(Model, self).__init__()

        # model structur:
        self.conv1 = nn.Conv2d(3, 10, kernel_size=(5,5),  stride=(1,1))
        self.conv2 = nn.Conv2d(10, 20, kernel_size=(5,5),  stride=(1,1))            # with mapool: output = 20 * (9,9) feature-maps -> flatten
        self.fc1 = nn.Linear(20*9*9, 250)
        self.fc2 = nn.Linear(250, 100)
        self.fc3 = nn.Linear(100, 2)

    def forward(self, x):

        # conv layers
        x = F.relu(self.conv1(x))   # shape: 1, 10, 46, 46
        x = F.max_pool2d(x, 2, 2)   # shape: 1, 10, 23, 23
        x = F.relu(self.conv2(x))   # shape: 1, 20, 19, 19
        x = F.max_pool2d(x, 2, 2)   # shape: 1, 20, 9, 9

        # flatten to dense layer:
        x = x.view(-1, 20*9*9)

        # dense layers
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        output = F.log_softmax(self.fc3(x), dim=1)
        return output


class Run:

    def __init__(self, epochs, learning_rate, dropout, momentum):

        # load model
        self.model = Model().cuda()

        # hyperparameters:
        self.epochs = epochs
        self.learning_rate = learning_rate
        self.dropout = dropout

    def preporcessing(self):

        dataset_folder = "/media/theodor/hdd/Programming/BWKI/dataset/bilder/"

        dataset = []

        for i in range(0, 35):

            sample_image = Image.open(dataset_folder + str(i) + ".png")
            data = torch.from_numpy(np.array(sample_image)).type("torch.Tensor").reshape(3, 50, 50)
            target = torch.tensor([[1, 0]])

            sample = (data, target)

            dataset.append(sample)


        train_loader = torch.utils.data.DataLoader(dataset, batch_size=8)

        return train_loader

    def train(self):

        train_set = self.preporcessing()

        criterion = nn.CrossEntropyLoss()
        optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate)

        for epoch in range(self.epochs):

            epoch_loss = 0
            for i, data in enumerate(train_set, 0):

                sample, target = data
                # set data as cuda varibale
                sample = Variable(sample.float().cuda())
                target = Variable(target.cuda())
                # initialize optimizer
                optimizer.zero_grad()
                # predict
                output = self.model(sample)
                # backpropagation
                print(output, target.squeeze(1))
                loss = criterion(output, target.squeeze(1))    # ERROR MESSAGE: RuntimeError: multi-target not supported at /pytorch/aten/src/THCUNN/generic/ClassNLLCriterion.cu:15
                loss.backward()
                optimizer.step()

                epoch_loss += loss.item()

            print("loss after epoch [", epoch, "|", self.epochs, "] :", epoch_loss)

    def test(self):
        pass


run = Run(10, 0.001, 0.5, 0.9)
run.train()

So I expected it to start training (of course not learning anything because the labels are wrong), Thanks in advance!

Answer

McLawrence picture McLawrence · Aug 2, 2019

For nn.CrossEntropyLoss the target has to be a single number from the interval [0, #classes] instead of a one-hot encoded target vector. Your target is [1, 0], thus PyTorch thinks you want to have multiple labels per input which is not supported.

Replace your one-hot-encoded targets:

[1, 0] --> 0

[0, 1] --> 1