|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
Note that we don't combine the main with ray_trainer as ray_trainer is used by other main. |
|
|
""" |
|
|
|
|
|
import hydra |
|
|
import ray |
|
|
import torch |
|
|
from split_monkey_patch import fit |
|
|
|
|
|
from verl import DataProto |
|
|
from verl.trainer.ppo.ray_trainer import RayPPOTrainer |
|
|
from verl.utils.reward_score import gsm8k, math |
|
|
|
|
|
|
|
|
def _select_rm_score_fn(data_source): |
|
|
if data_source == "openai/gsm8k": |
|
|
return gsm8k.compute_score |
|
|
elif data_source == "lighteval/MATH": |
|
|
return math.compute_score |
|
|
else: |
|
|
raise NotImplementedError |
|
|
|
|
|
|
|
|
class RewardManager: |
|
|
def __init__(self, tokenizer, num_examine) -> None: |
|
|
self.tokenizer = tokenizer |
|
|
self.num_examine = num_examine |
|
|
|
|
|
def __call__(self, data: DataProto, return_dict: bool = False): |
|
|
"""We will expand this function gradually based on the available datasets""" |
|
|
|
|
|
|
|
|
if "rm_scores" in data.batch.keys(): |
|
|
return data.batch["rm_scores"] |
|
|
|
|
|
reward_tensor = torch.zeros_like(data.batch["responses"], dtype=torch.float32) |
|
|
|
|
|
already_print_data_sources = {} |
|
|
|
|
|
for i in range(len(data)): |
|
|
data_item = data[i] |
|
|
|
|
|
prompt_ids = data_item.batch["prompts"] |
|
|
|
|
|
prompt_length = prompt_ids.shape[-1] |
|
|
|
|
|
valid_prompt_length = data_item.batch["attention_mask"][:prompt_length].sum() |
|
|
valid_prompt_ids = prompt_ids[-valid_prompt_length:] |
|
|
|
|
|
response_ids = data_item.batch["responses"] |
|
|
valid_response_length = data_item.batch["attention_mask"][prompt_length:].sum() |
|
|
valid_response_ids = response_ids[:valid_response_length] |
|
|
|
|
|
|
|
|
sequences = torch.cat((valid_prompt_ids, valid_response_ids)) |
|
|
sequences_str = self.tokenizer.decode(sequences) |
|
|
|
|
|
ground_truth = data_item.non_tensor_batch["reward_model"]["ground_truth"] |
|
|
|
|
|
|
|
|
data_source = data_item.non_tensor_batch["data_source"] |
|
|
compute_score_fn = _select_rm_score_fn(data_source) |
|
|
|
|
|
score = compute_score_fn(solution_str=sequences_str, ground_truth=ground_truth) |
|
|
reward_tensor[i, valid_response_length - 1] = score |
|
|
|
|
|
if data_source not in already_print_data_sources: |
|
|
already_print_data_sources[data_source] = 0 |
|
|
|
|
|
if already_print_data_sources[data_source] < self.num_examine: |
|
|
already_print_data_sources[data_source] += 1 |
|
|
print(sequences_str) |
|
|
|
|
|
if return_dict: |
|
|
return {"reward_tensor": reward_tensor} |
|
|
else: |
|
|
return reward_tensor |
|
|
|
|
|
|
|
|
@hydra.main(config_path="config", config_name="ppo_trainer_split", version_base=None) |
|
|
def main(config): |
|
|
if not ray.is_initialized(): |
|
|
|
|
|
ray.init( |
|
|
runtime_env={"env_vars": {"TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN"}}, |
|
|
num_cpus=config.ray_init.num_cpus, |
|
|
) |
|
|
|
|
|
ray.get(main_task.remote(config)) |
|
|
|
|
|
|
|
|
@ray.remote |
|
|
def main_task(config): |
|
|
|
|
|
from pprint import pprint |
|
|
|
|
|
from omegaconf import OmegaConf |
|
|
|
|
|
from verl.utils.fs import copy_to_local |
|
|
|
|
|
pprint(OmegaConf.to_container(config, resolve=True)) |
|
|
OmegaConf.resolve(config) |
|
|
|
|
|
|
|
|
local_path = copy_to_local(config.actor_rollout_ref.model.path) |
|
|
|
|
|
|
|
|
from verl.utils import hf_tokenizer |
|
|
|
|
|
tokenizer = hf_tokenizer(local_path) |
|
|
|
|
|
|
|
|
if config.actor_rollout_ref.actor.strategy == "fsdp": |
|
|
assert config.actor_rollout_ref.actor.strategy == config.critic.strategy |
|
|
from verl.single_controller.ray import RayWorkerGroup |
|
|
from verl.workers.fsdp_workers import ActorRolloutRefWorker, CriticWorker |
|
|
|
|
|
ray_worker_group_cls = RayWorkerGroup |
|
|
|
|
|
elif config.actor_rollout_ref.actor.strategy == "megatron": |
|
|
assert config.actor_rollout_ref.actor.strategy == config.critic.strategy |
|
|
from verl.single_controller.ray.megatron import NVMegatronRayWorkerGroup |
|
|
from verl.workers.megatron_workers import ActorRolloutRefWorker, CriticWorker |
|
|
|
|
|
ray_worker_group_cls = NVMegatronRayWorkerGroup |
|
|
|
|
|
else: |
|
|
raise NotImplementedError |
|
|
|
|
|
from verl.trainer.ppo.ray_trainer import ResourcePoolManager, Role |
|
|
|
|
|
role_worker_mapping = { |
|
|
Role.ActorRollout: ray.remote(ActorRolloutRefWorker), |
|
|
Role.Critic: ray.remote(CriticWorker), |
|
|
} |
|
|
|
|
|
|
|
|
actor_rollout_ref_pool_id = "actor_rollout_ref_pool" |
|
|
critic_pool_id = "critic_pool" |
|
|
if config.trainer.nnodes // 2 == 0 and config.trainer.n_gpus_per_node // 2 > 0: |
|
|
resource_pool_spec = { |
|
|
actor_rollout_ref_pool_id: [config.trainer.n_gpus_per_node // 2] * config.trainer.nnodes, |
|
|
critic_pool_id: [config.trainer.n_gpus_per_node // 2] * config.trainer.nnodes, |
|
|
} |
|
|
else: |
|
|
resource_pool_spec = { |
|
|
actor_rollout_ref_pool_id: [config.trainer.n_gpus_per_node] * (config.trainer.nnodes // 2), |
|
|
critic_pool_id: [config.trainer.n_gpus_per_node] * (config.trainer.nnodes // 2), |
|
|
} |
|
|
print(f"resource_pool_spec: {resource_pool_spec}") |
|
|
mapping = { |
|
|
Role.ActorRollout: actor_rollout_ref_pool_id, |
|
|
Role.Critic: critic_pool_id, |
|
|
} |
|
|
|
|
|
|
|
|
if config.algorithm.use_kl_in_reward or config.actor_rollout_ref.actor.use_kl_loss: |
|
|
role_worker_mapping[Role.RefPolicy] = ray.remote(ActorRolloutRefWorker) |
|
|
mapping[Role.RefPolicy] = actor_rollout_ref_pool_id |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if config.reward_model.enable: |
|
|
if config.reward_model.strategy == "fsdp": |
|
|
from verl.workers.fsdp_workers import RewardModelWorker |
|
|
elif config.reward_model.strategy == "megatron": |
|
|
from verl.workers.megatron_workers import RewardModelWorker |
|
|
else: |
|
|
raise NotImplementedError |
|
|
role_worker_mapping[Role.RewardModel] = ray.remote(RewardModelWorker) |
|
|
mapping[Role.RewardModel] = critic_pool_id |
|
|
|
|
|
reward_fn = RewardManager(tokenizer=tokenizer, num_examine=0) |
|
|
|
|
|
|
|
|
val_reward_fn = RewardManager(tokenizer=tokenizer, num_examine=1) |
|
|
|
|
|
resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping) |
|
|
|
|
|
RayPPOTrainer.fit = fit |
|
|
trainer = RayPPOTrainer( |
|
|
config=config, |
|
|
tokenizer=tokenizer, |
|
|
role_worker_mapping=role_worker_mapping, |
|
|
resource_pool_manager=resource_pool_manager, |
|
|
ray_worker_group_cls=ray_worker_group_cls, |
|
|
reward_fn=reward_fn, |
|
|
val_reward_fn=val_reward_fn, |
|
|
device_name=config.trainer.device, |
|
|
) |
|
|
trainer.init_workers() |
|
|
trainer.fit() |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|