LSTM(Long Short-Term Memory)

import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt

# ===== データ生成 =====
x = np.linspace(0, 100, 1000)
y = np.sin(x)

# 学習用に系列データを作成
seq_length = 20
X, Y = [], []
for i in range(len(y) - seq_length):
    X.append(y[i:i + seq_length])
    Y.append(y[i + seq_length])
X = np.array(X)
Y = np.array(Y)

X_train = torch.tensor(X, dtype=torch.float32).unsqueeze(-1)  # (batch_size, seq_length, input_size)
Y_train = torch.tensor(Y, dtype=torch.float32).unsqueeze(-1)  # (batch_size, output_size)

# ===== LSTMモデル定義 =====
class LSTMModel(nn.Module):
    def __init__(self, input_size=1, hidden_size=50, output_size=1):
        super(LSTMModel, self).__init__()
        self.lstm = nn.LSTM(input_size, hidden_size, batch_first=True)
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        out, _ = self.lstm(x)
        out = out[:, -1, :]  # 最後のタイムステップの出力を取得
        out = self.fc(out)  # 最後のタイムステップの出力を使用
        return out

model = LSTMModel()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

# ===== 学習 =====
epochs = 10
for epoch in range(epochs):
    optimizer.zero_grad()
    outputs = model(X_train)
    loss = criterion(outputs, Y_train)
    loss.backward()
    optimizer.step()
    
    print(f'Epoch [{epoch + 1}], Loss: {loss.item():.4f}')

# ===== 予測 =====
model.eval()
with torch.no_grad():
    preds = model(X_train).numpy()

# ===== 結果のプロット =====
plt.plot(Y, label='True')
plt.plot(preds, label='Predicted')
plt.legend()
plt.show()

$ python3 lstm.py
Epoch [1], Loss: 0.4882
Epoch [2], Loss: 0.4804
Epoch [3], Loss: 0.4726
Epoch [4], Loss: 0.4648
Epoch [5], Loss: 0.4570
Epoch [6], Loss: 0.4492
Epoch [7], Loss: 0.4414
Epoch [8], Loss: 0.4335
Epoch [9], Loss: 0.4254
Epoch [10], Loss: 0.4172