【忘備録】 nvidia dockerでUbuntu18.04に機械学習用の環境構築 〜最新版(2020/7)〜 【第3回 機械学習ライブラリ】

このページは第3回です.第1回docker編第2回sklearn編ができている前提で書いています.

今回はtensorflow・kerasとpytorchを入れる最終回です.
当初の目的はここで達成ですね.

僕はkerasとpytorchどちらを勉強したいかわからないので両方入れますが,普通にどちらか片方だけ入れればいいと思います.

tensorflow

まずはtensorflowを入れるために必要なライブラリを入れます.
Dockerfileに以下を追記します.

RUN apt update && apt upgrade -y && apt install -y --no-install-recommends \
   python3-setuptools

次にtensorflowを入れましょう.Dockerfileには以下を追記します.

RUN pip3 install -q tensorflow-gpu

tensorflowが動くか確かめるためのテストコードも置いておきます.
こちらは公式を参考にしました.

#!/usr/bin/env python
# coding: utf-8

from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf

mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0

model = tf.keras.models.Sequential([
 tf.keras.layers.Flatten(input_shape=(28, 28)),
 tf.keras.layers.Dense(128, activation='relu'),
 tf.keras.layers.Dropout(0.2),
 tf.keras.layers.Dense(10, activation='softmax')
])

model.compile(optimizer='adam',
             loss='sparse_categorical_crossentropy',
             metrics=['accuracy'])

model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test,  y_test, verbose=2)

keras

kerasを入れましょう.tensorflowで頑張る場合はいらないかもしれませんが,kerasと組み合わせたほうが楽だと思います.
Dockerfileに以下を追記します.

RUN pip3 install -q keras

kerasが動くか確かめるテストコードです.
こちらは公式を参考にしました.

from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K

batch_size = 128
num_classes = 10
epochs = 12

# input image dimensions
img_rows, img_cols = 28, 28

# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()

if K.image_data_format() == 'channels_first':
   x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
   x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
   input_shape = (1, img_rows, img_cols)
else:
   x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
   x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
   input_shape = (img_rows, img_cols, 1)

x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
                activation='relu',
                input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss=keras.losses.categorical_crossentropy,
             optimizer=keras.optimizers.Adadelta(),
             metrics=['accuracy'])

model.fit(x_train, y_train,
         batch_size=batch_size,
         epochs=epochs,
         verbose=1,
         validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])

pytorch

pytorchを入れます.時代はpytorchですね.
Dockerfileに以下を追記します.

RUN pip3 install -q torch torchvision

pytorchのテストコードです.
こちらも公式を参考にしました.

from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR


class Net(nn.Module):
   def __init__(self):
       super(Net, self).__init__()
       self.conv1 = nn.Conv2d(1, 32, 3, 1)
       self.conv2 = nn.Conv2d(32, 64, 3, 1)
       self.dropout1 = nn.Dropout2d(0.25)
       self.dropout2 = nn.Dropout2d(0.5)
       self.fc1 = nn.Linear(9216, 128)
       self.fc2 = nn.Linear(128, 10)

   def forward(self, x):
       x = self.conv1(x)
       x = F.relu(x)
       x = self.conv2(x)
       x = F.relu(x)
       x = F.max_pool2d(x, 2)
       x = self.dropout1(x)
       x = torch.flatten(x, 1)
       x = self.fc1(x)
       x = F.relu(x)
       x = self.dropout2(x)
       x = self.fc2(x)
       output = F.log_softmax(x, dim=1)
       return output


def train(args, model, device, train_loader, optimizer, epoch):
   model.train()
   for batch_idx, (data, target) in enumerate(train_loader):
       data, target = data.to(device), target.to(device)
       optimizer.zero_grad()
       output = model(data)
       loss = F.nll_loss(output, target)
       loss.backward()
       optimizer.step()
       if batch_idx % args.log_interval == 0:
           print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
               epoch, batch_idx * len(data), len(train_loader.dataset),
               100. * batch_idx / len(train_loader), loss.item()))
           if args.dry_run:
               break


def test(model, device, test_loader):
   model.eval()
   test_loss = 0
   correct = 0
   with torch.no_grad():
       for data, target in test_loader:
           data, target = data.to(device), target.to(device)
           output = model(data)
           test_loss += F.nll_loss(output, target, reduction='sum').item()  # sum up batch loss
           pred = output.argmax(dim=1, keepdim=True)  # get the index of the max log-probability
           correct += pred.eq(target.view_as(pred)).sum().item()

   test_loss /= len(test_loader.dataset)

   print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
       test_loss, correct, len(test_loader.dataset),
       100. * correct / len(test_loader.dataset)))


def main():
   # Training settings
   parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
   parser.add_argument('--batch-size', type=int, default=64, metavar='N',
                       help='input batch size for training (default: 64)')
   parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
                       help='input batch size for testing (default: 1000)')
   parser.add_argument('--epochs', type=int, default=14, metavar='N',
                       help='number of epochs to train (default: 14)')
   parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
                       help='learning rate (default: 1.0)')
   parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
                       help='Learning rate step gamma (default: 0.7)')
   parser.add_argument('--no-cuda', action='store_true', default=False,
                       help='disables CUDA training')
   parser.add_argument('--dry-run', action='store_true', default=False,
                       help='quickly check a single pass')
   parser.add_argument('--seed', type=int, default=1, metavar='S',
                       help='random seed (default: 1)')
   parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                       help='how many batches to wait before logging training status')
   parser.add_argument('--save-model', action='store_true', default=False,
                       help='For Saving the current Model')
   args = parser.parse_args()
   use_cuda = not args.no_cuda and torch.cuda.is_available()

   torch.manual_seed(args.seed)

   device = torch.device("cuda" if use_cuda else "cpu")

   kwargs = {'batch_size': args.batch_size}
   if use_cuda:
       kwargs.update({'num_workers': 1,
                      'pin_memory': True,
                      'shuffle': True},
                    )

   transform=transforms.Compose([
       transforms.ToTensor(),
       transforms.Normalize((0.1307,), (0.3081,))
       ])
   dataset1 = datasets.MNIST('../data', train=True, download=True,
                      transform=transform)
   dataset2 = datasets.MNIST('../data', train=False,
                      transform=transform)
   train_loader = torch.utils.data.DataLoader(dataset1,**kwargs)
   test_loader = torch.utils.data.DataLoader(dataset2, **kwargs)

   model = Net().to(device)
   optimizer = optim.Adadelta(model.parameters(), lr=args.lr)

   scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
   for epoch in range(1, args.epochs + 1):
       train(args, model, device, train_loader, optimizer, epoch)
       test(model, device, test_loader)
       scheduler.step()

   if args.save_model:
       torch.save(model.state_dict(), "mnist_cnn.pt")


if __name__ == '__main__':
   main()

ここまでのファイルを公開しています

Dockerfileとテストコードを公開しました.
ご自由にダウンロードして使って下さい.

まとめ

ここまで3回に渡って機械学習のライブラリをdocker環境に構築してきました.
現在のライブラリのバージョンではここまでの動作確認はできているので,よかったら使ってみて下さい.

これからこのdockerを使って勉強していこうと思います.

この記事を書いた人: @wina_S_1991

いいなと思ったら応援しよう!