import torch import torch.nn as nn class LSTMModel(nn.Module): def __init__(self, input_size=1, hidden_size=128, output_size=2): super(LSTMModel, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.conv1d = nn.Conv1d(input_size, 64, kernel_size=5, padding=2) self.relu = nn.ReLU() self.lstm1 = nn.LSTM(64, hidden_size, bidirectional=True, batch_first=True) self.lstm2 = nn.LSTM(hidden_size * 2, hidden_size, bidirectional=True, batch_first=True) self.lstm3 = nn.LSTM(hidden_size * 2, 64, bidirectional=False, batch_first=True) self.fc1 = nn.Linear(64, 512) self.fc2 = nn.Linear(512, 256) self.fc3 = nn.Linear(256, 128) self.fc_sbp = nn.Linear(128, 1) self.fc_dbp = nn.Linear(128, 1) def forward(self, x): # 将输入传递给Conv1d层 x = self.conv1d(x.permute(0, 2, 1).contiguous()) x = self.relu(x) x = x.permute(0, 2, 1).contiguous() # 将输入传递给LSTM层 x, _ = self.lstm1(x) x, _ = self.lstm2(x) x, _ = self.lstm3(x) # 只使用最后一个时间步的输出 x = x[:, -1, :] # 将LSTM输出传递给全连接层 x = self.relu(self.fc1(x)) x = self.relu(self.fc2(x)) x = self.relu(self.fc3(x)) # 从两个Linear输出最终结果 sbp = self.fc_sbp(x) dbp = self.fc_dbp(x) return sbp, dbp if __name__ == "__main__": # 创建模型实例 model = LSTMModel() # 定义示例输入 batch_size = 64 seq_len = 1250 input_size = 1 input_data = torch.randn(batch_size, seq_len, input_size) # 将输入数据传递给模型 sbp, dbp = model(input_data) print(sbp.shape, dbp.shape) # 输出: torch.Size([64, 1]) torch.Size([64, 1])