Update README.md
Browse files
README.md
CHANGED
|
@@ -19,8 +19,35 @@ It is a single dictionary of subspaces for 16K concepts and serves as a drop-in
|
|
| 19 |
# 3. How can I use these dictionaries straight away?
|
| 20 |
|
| 21 |
```python
|
|
|
|
| 22 |
import pyvene as pv
|
| 23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
```
|
| 25 |
|
| 26 |
# 4. Point of Contact
|
|
|
|
| 19 |
# 3. How can I use these dictionaries straight away?
|
| 20 |
|
| 21 |
```python
|
| 22 |
+
from huggingface_hub import hf_hub_download
|
| 23 |
import pyvene as pv
|
| 24 |
|
| 25 |
+
# Create an intervention.
|
| 26 |
+
class Encoder(pv.CollectIntervention):
|
| 27 |
+
"""An intervention that reads concept latent from streams"""
|
| 28 |
+
def __init__(self, **kwargs):
|
| 29 |
+
super().__init__(**kwargs, keep_last_dim=True)
|
| 30 |
+
self.proj = torch.nn.Linear(
|
| 31 |
+
self.embed_dim, kwargs["latent_dim"], bias=False)
|
| 32 |
+
def forward(self, base, source=None, subspaces=None):
|
| 33 |
+
return torch.relu(self.proj(base))
|
| 34 |
+
|
| 35 |
+
# Loading weights
|
| 36 |
+
path_to_params = hf_hub_download(repo_id="pyvene/gemma-reft-2b-it-res", filename="l20/weight.pt")
|
| 37 |
+
encoder = Encoder(embed_dim=params.shape[0], latent_dim=params.shape[1])
|
| 38 |
+
encoder.proj.weight.data = params.float()
|
| 39 |
+
|
| 40 |
+
# Mount the loaded intervention.
|
| 41 |
+
pv_model = pv.IntervenableModel({
|
| 42 |
+
"component": f"model.layers[20].output",
|
| 43 |
+
"intervention": encoder}, model=model)
|
| 44 |
+
|
| 45 |
+
# use pv_model just as other torch model, and you can collect subspace latent.
|
| 46 |
+
prompt = "Would you be able to travel through time using a wormhole?"
|
| 47 |
+
input_ids = torch.tensor([tokenizer.apply_chat_template(
|
| 48 |
+
[{"role": "user", "content": prompt}], tokenize=True, add_generation_prompt=True)]).cuda()
|
| 49 |
+
acts = pv_model.forward(
|
| 50 |
+
{"input_ids": input_ids}, return_dict=True).collected_activations[0]
|
| 51 |
```
|
| 52 |
|
| 53 |
# 4. Point of Contact
|