| | import torch
|
| | import torch.nn as nn
|
| |
|
| |
|
| | MODEL_SAVE_PATH = "char_lm_model.pth"
|
| | SEQ_LENGTH = 32
|
| | EMBEDDING_DIM = 64
|
| | HIDDEN_DIM = 64
|
| |
|
| |
|
| | with open('dataset.txt', 'r', encoding='utf-8') as f:
|
| | text = f.read()
|
| |
|
| | chars = sorted(list(set(text)))
|
| | vocab_size = len(chars)
|
| | char_to_idx = {ch: i for i, ch in enumerate(chars)}
|
| | idx_to_char = {i: ch for i, ch in enumerate(chars)}
|
| |
|
| |
|
| | class CharLM(nn.Module):
|
| | def __init__(self):
|
| | super(CharLM, self).__init__()
|
| | self.embedding = nn.Embedding(vocab_size, EMBEDDING_DIM)
|
| | self.rnn = nn.GRU(EMBEDDING_DIM, HIDDEN_DIM, batch_first=True)
|
| | self.fc = nn.Linear(HIDDEN_DIM, vocab_size)
|
| |
|
| | def forward(self, x, hidden=None):
|
| | x = self.embedding(x)
|
| | out, hidden = self.rnn(x, hidden)
|
| | out = self.fc(out)
|
| | return out, hidden
|
| |
|
| |
|
| | model = CharLM()
|
| | model.load_state_dict(torch.load(MODEL_SAVE_PATH))
|
| | model.eval()
|
| |
|
| | def generate_text(model, start_str, length=100, temperature=0.7, top_k=0):
|
| | """
|
| | Generate text with temperature scaling and top-k sampling
|
| | """
|
| | model.eval()
|
| | chars = [ch for ch in start_str]
|
| | input_seq = torch.tensor([char_to_idx[ch] for ch in chars]).unsqueeze(0)
|
| | hidden = None
|
| |
|
| | with torch.no_grad():
|
| | for _ in range(length):
|
| | outputs, hidden = model(input_seq, hidden)
|
| | logits = outputs[0, -1] / temperature
|
| |
|
| | if top_k > 0:
|
| | top_vals, top_idx = torch.topk(logits, top_k)
|
| | logits[logits < top_vals[-1]] = -float('Inf')
|
| |
|
| | probs = torch.softmax(logits, dim=-1)
|
| | next_char = torch.multinomial(probs, num_samples=1).item()
|
| | chars.append(idx_to_char[next_char])
|
| | input_seq = torch.tensor([[next_char]])
|
| |
|
| | return ''.join(chars)
|
| |
|
| |
|
| | def chat():
|
| | print("Chat with the model! Type 'exit' to stop.")
|
| | while True:
|
| | user_input = input("You: ")
|
| | if user_input.lower() == 'exit':
|
| | break
|
| | response = generate_text(model, user_input, length=100, temperature=0.7, top_k=5)
|
| | print("Bot:", response)
|
| |
|
| | chat()
|
| |
|