Lädt...


🔧 Save model in PyTorch


Nachrichtenbereich: 🔧 Programmierung
🔗 Quelle: dev.to

Buy Me a Coffee

*Memos:

  • My post explains Linear Regression in PyTorch.
  • My post explains Batch, Mini-Batch and Stochastic Gradient Descent with DataLoader() in PyTorch.
  • My post explains Batch Gradient Descent without DataLoader() in PyTorch.

You can save a model with save() after training and testing it as shown below:

*Memos:

import torch
from torch import nn
from torch import optim
import matplotlib.pyplot as plt

# Set device
device = "cuda" if torch.cuda.is_available() else "cpu"

""" Prepare dataset """
weight = 0.8
bias = 0.5

X = torch.tensor([[0.00], [0.02], [0.04], [0.06], [0.08], # Size(50, 1)
                  [0.10], [0.12], [0.14], [0.16], [0.18],
                  [0.20], [0.22], [0.24], [0.26], [0.28],
                  [0.30], [0.32], [0.34], [0.36], [0.38],
                  [0.40], [0.42], [0.44], [0.46], [0.48],
                  [0.50], [0.52], [0.54], [0.56], [0.58],
                  [0.60], [0.62], [0.64], [0.66], [0.68],
                  [0.70], [0.72], [0.74], [0.76], [0.78],
                  [0.80], [0.82], [0.84], [0.86], [0.88],
                  [0.90], [0.92], [0.94], [0.96], [0.98]], device=device)
Y = weight * X + bias

l = int(0.8 * len(X))
X_train, Y_train, X_test, Y_test = X[:l], Y[:l], X[l:], Y[l:]
""" Prepare dataset """

""" Prepare model, loss function and optimizer """
class LinearRegressionModel(nn.Module):
    def __init__(self):
        super().__init__()
        self.linear_layer = nn.Linear(in_features=1, out_features=1)

    def forward(self, x):
        return self.linear_layer(x)

torch.manual_seed(42)

my_model = LinearRegressionModel().to(device)

loss_fn = nn.L1Loss()

optimizer = optim.SGD(params=my_model.parameters(), lr=0.01)
""" Prepare model, loss function and optimizer """

""" Train and test model """
epochs = 50

epoch_count = []
loss_values = []
test_loss_values = []

for epoch in range(epochs):

    """ Train """
    my_model.train()

    # 1. Calculate predictions(Forward propagation)
    Y_pred = my_model(X_train)

    # 2. Calculate loss
    loss = loss_fn(Y_pred, Y_train)

    # 3. Zero out gradients
    optimizer.zero_grad()

    # 4. Calculate a gradient(Backpropagation)
    loss.backward()

    # 5. Update parameters
    optimizer.step()
    """ Train """

    """ Test """
    my_model.eval()

    with torch.inference_mode():
        Y_test_pred = my_model(x=X_test)
        test_loss = loss_fn(Y_test_pred, Y_test)
    if epoch % 10 == 0:
        epoch_count.append(epoch)
        loss_values.append(loss)
        test_loss_values.append(test_loss)
        # print(f"Epoch: {epoch} | Loss: {loss} | Test loss: {test_loss}")
        # ↑ ↑ ↑ ↑ ↑ ↑ ↑ ↑ Uncomment it to see the details ↑ ↑ ↑ ↑ ↑ ↑ ↑ ↑
    """ Test """
""" Train and test model """

""" Visualize train and test data and predictions"""
with torch.inference_mode():
    Y_pred = my_model(x=X_test)

def plot_predictions(X_train, Y_train, X_test, Y_test, predictions=None):
    plt.figure(figsize=[6, 4])
    plt.scatter(X_train, Y_train, c='g', s=5, label='Train data(Green)')
    plt.scatter(X_test, Y_test, c='b', s=15, label='Test data(Blue)')
    if predictions is not None:
        plt.scatter(X_test, predictions, c='r', s=15, label='Predictions(Red)')
    plt.title(label="Train and test data and predictions", fontsize=14)
    plt.legend(fontsize=14)

plot_predictions(X_train=X_train.cpu(),
                 Y_train=Y_train.cpu(),
                 X_test=X_test.cpu(),
                 Y_test=Y_test.cpu(),
                 predictions=Y_pred.cpu())
""" Visualize train and test data, predictions"""

""" Visualize train and test loss """
def plot_loss_curves(epoch_count, loss_values, test_loss_values):
    plt.figure(figsize=[6, 4])
    plt.plot(epoch_count, loss_values, label="Train loss")
    plt.plot(epoch_count, test_loss_values, label="Test loss")
    plt.title(label="Train and test loss curves", fontsize=14)
    plt.ylabel(ylabel="Loss", fontsize=14)
    plt.xlabel(xlabel="Epochs", fontsize=14)
    plt.legend(fontsize=14)

plot_loss_curves(epoch_count=epoch_count,
                 loss_values=torch.tensor(loss_values).cpu(),
                 test_loss_values=torch.tensor(test_loss_values).cpu())
""" Visualize train and test loss """

""" Save model """
from pathlib import Path

MODEL_PATH = Path("models")
MODEL_PATH.mkdir(parents=True, exist_ok=True)

MODEL_NAME = "linear_regression_0.pth"
MODEL_SAVE_PATH = MODEL_PATH / MODEL_NAME

torch.save(obj=my_model.state_dict(), f=MODEL_SAVE_PATH)
""" Save model """

Image description

Image description

Now, a model is saved as shown below:

Colab:

Image description

JupyterLab:

Image description

...

🔧 Save model in PyTorch


📈 26.31 Punkte
🔧 Programmierung

📰 Pytorch: Microsoft startet neuen Azure-Service Pytorch Enterprise


📈 25.1 Punkte
📰 IT Nachrichten

📰 PyTorch Lightning 1.0: PyTorch, nur schneller und flexibler


📈 25.1 Punkte
📰 IT Nachrichten

🔧 PyTorch on Azure: Full support for PyTorch 1.2


📈 25.1 Punkte
🔧 Programmierung

🔧 Learn You a PyTorch! (aka Introduction Into PyTorch)


📈 25.1 Punkte
🔧 Programmierung

📰 Using SHAP to Debug a PyTorch Image Regression Model


📈 18.79 Punkte
🔧 AI Nachrichten

📰 Create Amazon SageMaker models using the PyTorch Model Zoo


📈 18.79 Punkte
🔧 AI Nachrichten

📰 Training a Linear Regression Model in PyTorch


📈 18.79 Punkte
🔧 AI Nachrichten

🔧 Load model in PyTorch


📈 18.79 Punkte
🔧 Programmierung

🔧 Fine-Tuning a Pre-Trained Model in PyTorch: A Step-by-Step Guide for Beginners


📈 18.79 Punkte
🔧 Programmierung

🔧 My PyTorch Model in Kaggle uses 100% CPU and 0% GPU During Training


📈 18.79 Punkte
🔧 Programmierung

📰 Diffusion Model from Scratch in Pytorch


📈 18.79 Punkte
🔧 AI Nachrichten

📰 Simple Ways to Speed Up Your PyTorch Model Training


📈 18.79 Punkte
🔧 AI Nachrichten

🔧 Understanding GPT: How To Implement a Simple GPT Model with PyTorch


📈 18.79 Punkte
🔧 Programmierung

🔧 Accelerate Model Training with PyTorch


📈 18.79 Punkte
🔧 Programmierung

📰 Amazon SageMaker model parallel library now accelerates PyTorch FSDP workloads by up to 20%


📈 18.79 Punkte
🔧 AI Nachrichten

⚠️ #0daytoday #PyTorch Model Server Registration / Deserialization Remote Code Execution Exploit [#0day #Exploit]


📈 18.79 Punkte
⚠️ PoC

⚠️ PyTorch Model Server Registration / Deserialization Remote Code Execution


📈 18.79 Punkte
⚠️ PoC

⚠️ PyTorch Model Server Registration / Deserialization Remote Code Execution


📈 18.79 Punkte
⚠️ PoC

🎥 Scaling AI Model Training and Inferencing Efficiently with PyTorch


📈 18.79 Punkte
🎥 Video | Youtube

📰 Handwritten Digit Recognition with LeNet5 Model in PyTorch


📈 18.79 Punkte
🔧 AI Nachrichten

📰 Tesla mit Preiserhöhung in Europa: Neue Model S, Model X, Model 3 kündigen sich an


📈 18.7 Punkte
📰 IT Nachrichten

📰 Tesla: Model 3, Model S und Model X mit mehr Reichweite


📈 18.7 Punkte
📰 IT Nachrichten

📰 Cuphead: Wird für Tesla Model 3, Model S und Model X umgesetzt


📈 18.7 Punkte
📰 IT Nachrichten

📰 ML model registry — the “interface” that binds model experiments and model deployment


📈 18.7 Punkte
🔧 AI Nachrichten

📰 Tesla: Preiserhöhung für Model 3, Model S und Model X in Deutschland


📈 18.7 Punkte
📰 IT Nachrichten

📰 This gadget can help you save drive safer and save money, and now there's 20% off!


📈 15.05 Punkte
📰 IT Nachrichten

🍏 Save on Dollar Flight Club, then save on air travel for the rest of your life


📈 15.05 Punkte
🍏 iOS / Mac OS

📰 Sustainability initiatives won't just save the planet -- they'll save your company, too


📈 15.05 Punkte
📰 IT Nachrichten

📰 Outsourcing Your Software Development During the 2020 Pandemic – Save Work, Save Lives


📈 15.05 Punkte
📰 IT Security Nachrichten

🪟 Save $50 on Ecobee's newest SmartThermostat or save on a budget version


📈 15.05 Punkte
🪟 Windows Tipps

matomo