text
stringlengths 1.03k
82.6k
| file_name
stringlengths 8
85
|
|---|---|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import warnings
from contextlib import contextmanager
from importlib.metadata import version
from typing import Any, Callable, ContextManager, Optional
import numpy as np
import torch
import torch.distributed as dist
try:
# NPU patch
import mindspeed.megatron_adaptor # noqa: F401
from mindspeed.megatron_adaptor import repatch
except ImportError:
repatch = None
pass
from accelerate import init_empty_weights
from megatron.core import dist_checkpointing
from megatron.core import parallel_state as mpu
from megatron.core.dist_checkpointing.mapping import ShardedTensor
from megatron.core.dist_checkpointing.serialization import StrictHandling
from megatron.core.models.gpt.gpt_model import ModelType
from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed
from packaging.version import Version
from transformers import AutoConfig
from verl.model_merger.megatron_model_merger import get_dynamic_pipeline_shards
from verl.models.mcore import hf_to_mcore_config
from verl.utils.device import get_device_name, get_torch_device
from verl.utils.megatron_utils import get_model
def _init_args():
"""
Examples:
1. single rank conversion for any model:
> python converter_hf_to_mcore.py --hf_model_path %{hf_model} --output_path ${output_path}
2. distributed conversion for DeepseekV3 671B:
> torchrun --nproc_per_node 1 --nnodes 4 --node_rank ${RANK} converter_hf_to_mcore.py \
--hf_model_path %{hf_model} --output_path ${output_path}
"""
parser = argparse.ArgumentParser()
parser.add_argument("--hf_model_path", type=str, required=True, help="The path for the huggingface model")
parser.add_argument("--output_path", type=str, required=True, help="The path for the output mcore model")
parser.add_argument("--pp_size", type=int, default=1, help="pipeline model parallel size")
parser.add_argument("--ep_size", type=int, default=1, help="expert model parallel size")
parser.add_argument("--use_cpu_initialization", action="store_true", help="Whether to use cpu initialization")
parser.add_argument("--test", action="store_true", help="Whether to test the conversion")
parser.add_argument("--trust_remote_code", action="store_true", help="Whether to trust remote code")
args = parser.parse_args()
return args
def test_conversion(megatron_model_provider, tfconfig, output_path, model):
########### test ###########
# load model
model_test = get_model(
model_provider_func=megatron_model_provider,
model_type=ModelType.encoder_or_decoder,
wrap_with_ddp=True,
transformer_config=tfconfig,
)
ref_state_dict = model_test[0].module.sharded_state_dict()
dist_checkpointing.load(ref_state_dict, output_path, strict=StrictHandling.ASSUME_OK_UNEXPECTED)
dut_state_dict = model[0].module.state_dict()
for name in dut_state_dict.keys():
if dut_state_dict[name] is None:
print(f"[Warning] {name} is none in dut_state_dict")
continue
dut_data = dut_state_dict[name].data
if name in ref_state_dict:
ref_data = ref_state_dict[name]
if isinstance(ref_data, ShardedTensor):
ref_data = ref_data.data.view(ref_data.local_shape)
else:
ref_data = ref_data.data
assert dut_data.shape == ref_data.shape, f"{name=} {dut_data.shape=} {ref_data.shape=}"
assert (dut_data == ref_data).all(), f"{name} is not equal"
print(f"{name} is equal")
else:
print(f"[Warning] {name} is not in ref_state_dict")
for name in ref_state_dict.keys():
if ref_state_dict[name] is None:
print(f"[Warning] {name} is none in ref_state_dict")
continue
ref_data = ref_state_dict[name]
if isinstance(ref_data, ShardedTensor):
ref_data = ref_data.data.view(ref_data.local_shape)
else:
ref_data = ref_data.data
if name in dut_state_dict:
dut_data = dut_state_dict[name].data
assert dut_data.shape == ref_data.shape, f"{name=} {dut_data.shape=} {ref_data.shape=}"
assert (dut_data == ref_data).all(), f"{name} is not equal"
print(f"{name} is equal")
else:
print(f"[Warning] {name} is not in dut_state_dict")
print("Conversion test passed!")
@torch.inference_mode()
def convert_checkpoint_from_transformers_to_megatron(
hf_model, model, hf_config, layer_start_end: Optional[tuple[int, int]] = None
):
if layer_start_end is None:
layer_start_end = (0, len(model.decoder.layers))
layer_start, layer_end = layer_start_end
pp_rank = mpu.get_pipeline_model_parallel_rank()
pp_size = mpu.get_pipeline_model_parallel_world_size()
ep_rank = mpu.get_expert_model_parallel_rank()
ep_size = mpu.get_expert_model_parallel_world_size()
numel = 0
num_attention_heads = hf_config.num_attention_heads
num_key_value_heads = hf_config.num_key_value_heads
hidden_dim = hf_config.hidden_size
head_dim = getattr(hf_config, "head_dim", hidden_dim // num_attention_heads)
if num_attention_heads != num_key_value_heads:
print("[WARNING] Converting GQA model")
has_qkv_bias = getattr(hf_config, "qkv_bias", False) or getattr(hf_config, "attention_bias", False)
has_share_expert = getattr(hf_config, "shared_expert_intermediate_size", None)
if pp_rank == 0:
numel += safe_copy(hf_model.model.embed_tokens.weight, model.embedding.word_embeddings.weight)
assert len(model.decoder.layers) == (layer_end - layer_start), (
f"Expected {len(model.decoder.layers)} layers, but got {layer_end - layer_start}"
)
for layer_idx, (layer, hf_layer) in enumerate(
zip(model.decoder.layers, hf_model.model.layers[layer_start:layer_end], strict=True)
):
global_layer_idx = layer_idx + layer_start
numel_cur = numel
numel += safe_copy(hf_layer.input_layernorm.weight, layer.self_attention.linear_qkv.layer_norm_weight)
q = hf_layer.self_attn.q_proj.weight.view(
[num_key_value_heads, head_dim * num_attention_heads // num_key_value_heads, -1]
)
k = hf_layer.self_attn.k_proj.weight.view([num_key_value_heads, head_dim, -1])
v = hf_layer.self_attn.v_proj.weight.view([num_key_value_heads, head_dim, -1])
qkv = torch.cat([q, k, v], dim=1).view(-1, hidden_dim).contiguous()
numel += safe_copy(qkv, layer.self_attention.linear_qkv.weight)
if has_qkv_bias:
q_bias = hf_layer.self_attn.q_proj.bias.view([num_key_value_heads, -1])
k_bias = hf_layer.self_attn.k_proj.bias.view([num_key_value_heads, -1])
v_bias = hf_layer.self_attn.v_proj.bias.view([num_key_value_heads, -1])
qkv_bias = torch.cat([q_bias, k_bias, v_bias], dim=1).view(-1).contiguous()
numel += safe_copy(qkv_bias, layer.self_attention.linear_qkv.bias)
if hasattr(hf_layer.self_attn, "q_norm"):
numel += safe_copy(hf_layer.self_attn.q_norm.weight.data, layer.self_attention.q_layernorm.weight)
numel += safe_copy(hf_layer.self_attn.k_norm.weight.data, layer.self_attention.k_layernorm.weight)
numel += safe_copy(hf_layer.self_attn.o_proj.weight, layer.self_attention.linear_proj.weight)
numel += safe_copy(hf_layer.post_attention_layernorm.weight, layer.pre_mlp_layernorm.weight)
numel += safe_copy(hf_layer.mlp.gate.weight, layer.mlp.router.weight)
for idx, hf_expert in enumerate(hf_layer.mlp.experts):
num_experts = len(hf_layer.mlp.experts)
num_local_experts = num_experts // ep_size
expert_idx_start = ep_rank * num_local_experts
expert_idx_end = (ep_rank + 1) * num_local_experts
if idx < expert_idx_start or idx >= expert_idx_end:
continue
local_expert_idx = idx - expert_idx_start
fc1_weight = torch.cat([hf_expert.gate_proj.weight, hf_expert.up_proj.weight])
numel += safe_copy(fc1_weight, layer.mlp.experts.linear_fc1._parameters[f"weight{local_expert_idx}"])
numel += safe_copy(
hf_expert.down_proj.weight, layer.mlp.experts.linear_fc2._parameters[f"weight{local_expert_idx}"]
)
if has_share_expert:
numel += safe_copy(hf_layer.mlp.shared_expert_gate.weight, layer.mlp.shared_experts.gate_weight)
shared_fc1_weight = torch.cat(
[hf_layer.mlp.shared_expert.gate_proj.weight, hf_layer.mlp.shared_expert.up_proj.weight]
)
numel += safe_copy(shared_fc1_weight, layer.mlp.shared_experts.linear_fc1.weight)
numel += safe_copy(hf_layer.mlp.shared_expert.down_proj.weight, layer.mlp.shared_experts.linear_fc2.weight)
print(f"{pp_rank=} {global_layer_idx=} {layer_idx=} {numel=} numel this layer={numel - numel_cur}")
if pp_rank == pp_size - 1:
numel += safe_copy(hf_model.model.norm.weight, model.decoder.final_layernorm.weight)
numel += safe_copy(hf_model.lm_head.weight, model.output_layer.weight)
return numel
def safe_copy(
src_tensor: torch.Tensor,
dst_tensor: torch.Tensor,
skip_dtype_assert: bool = False,
):
if not skip_dtype_assert:
if src_tensor.dtype != dst_tensor.dtype:
raise ValueError(f"Get source dtype {src_tensor.dtype}, but target dtype {dst_tensor.dtype}")
assert src_tensor.shape == dst_tensor.shape
dst_tensor.data.copy_(src_tensor.data)
return src_tensor.numel()
@torch.inference_mode()
def convert_checkpoint_from_transformers_to_megatron_qwen2_5_vl(hfmodel, mgmodel, hf_config):
mgmodel = mgmodel.bfloat16()
hfmodel = hfmodel.bfloat16()
num_attention_heads = hf_config.num_attention_heads
num_query_groups = hf_config.num_key_value_heads
hidden_size = hf_config.hidden_size
head_dim = hidden_size // num_attention_heads
# 1. vision model
if Version(version("transformers")) < Version("4.52.0"):
print("Using transformers < 4.52 API to load vision model")
hfvision = hfmodel.visual
else:
hfvision = hfmodel.model.visual
mgvision = mgmodel.vision_model
vision_hidden_size = mgvision.config.hidden_size
vision_num_query_groups = mgvision.config.num_query_groups
vision_head_dim = vision_hidden_size // mgvision.config.num_attention_heads
copied_numel = 0
safe_copy(hfvision.rotary_pos_emb.inv_freq, mgvision.rotary_pos_emb.inv_freq)
copied_numel += safe_copy(hfvision.patch_embed.proj.weight, mgvision.patch_embed.proj.weight)
for hfblock, mgblock in zip(hfvision.blocks, mgvision.decoder.layers, strict=True):
# norm1 --> linear_qkv.norm
copied_numel += safe_copy(hfblock.norm1.weight, mgblock.self_attention.linear_qkv.layer_norm_weight)
# norm2 --> mlp.linear_fc1.norm
copied_numel += safe_copy(hfblock.norm2.weight, mgblock.mlp.linear_fc1.layer_norm_weight)
# qkv --> self_attention.linear_qkv
converted_weight = (
hfblock.attn.qkv.weight.view(3, vision_num_query_groups, -1, vision_head_dim, vision_hidden_size)
.transpose(0, 1)
.flatten(1, 2)
.reshape(-1, vision_hidden_size)
.contiguous()
)
copied_numel += safe_copy(converted_weight, mgblock.self_attention.linear_qkv.weight)
converted_bias = (
hfblock.attn.qkv.bias.view(3, vision_num_query_groups, -1)
.transpose(0, 1)
.flatten(1, 2)
.view(-1)
.contiguous()
)
copied_numel += safe_copy(converted_bias, mgblock.self_attention.linear_qkv.bias)
# proj --> self_attention.linear_proj
copied_numel += safe_copy(hfblock.attn.proj.weight, mgblock.self_attention.linear_proj.weight)
copied_numel += safe_copy(hfblock.attn.proj.bias, mgblock.self_attention.linear_proj.bias)
# mlp --> mlp: gate
fc1_weight = torch.cat([hfblock.mlp.gate_proj.weight, hfblock.mlp.up_proj.weight])
fc1_bias = torch.cat([hfblock.mlp.gate_proj.bias, hfblock.mlp.up_proj.bias])
copied_numel += safe_copy(fc1_weight, mgblock.mlp.linear_fc1.weight)
copied_numel += safe_copy(fc1_bias, mgblock.mlp.linear_fc1.bias)
copied_numel += safe_copy(hfblock.mlp.down_proj.weight, mgblock.mlp.linear_fc2.weight)
copied_numel += safe_copy(hfblock.mlp.down_proj.bias, mgblock.mlp.linear_fc2.bias)
# 2. vision projector
hfprojector = hfvision.merger
mgprojector = mgvision.projection
copied_numel += safe_copy(hfprojector.ln_q.weight, mgvision.decoder.final_layernorm.weight)
copied_numel += safe_copy(hfprojector.mlp[0].weight, mgprojector.encoder.linear_fc1.weight)
copied_numel += safe_copy(hfprojector.mlp[0].bias, mgprojector.encoder.linear_fc1.bias)
copied_numel += safe_copy(hfprojector.mlp[2].weight, mgprojector.encoder.linear_fc2.weight)
copied_numel += safe_copy(hfprojector.mlp[2].bias, mgprojector.encoder.linear_fc2.bias)
n_params = sum([t.numel() for t in hfvision.state_dict().values()])
assert n_params == copied_numel, f"n_params={n_params} != copied_numel={copied_numel}"
# 3. llm [just Qwen2]
if Version(version("transformers")) < Version("4.52.0"):
print("Using transformers < 4.52 API to load llm")
hfllm = hfmodel.model
else:
hfllm = hfmodel.model.language_model
mgllm = mgmodel.language_model
copied_numel = 0
copied_numel += safe_copy(hfllm.embed_tokens.weight, mgllm.embedding.word_embeddings.weight)
layermaps = zip(mgllm.decoder.layers, hfllm.layers, strict=True)
for mglayer, hflayer in layermaps:
copied_numel += safe_copy(hflayer.input_layernorm.weight, mglayer.self_attention.linear_qkv.layer_norm_weight)
q_proj_weight = hflayer.self_attn.q_proj.weight.view(num_query_groups, -1, head_dim, hidden_size)
k_proj_weight = hflayer.self_attn.k_proj.weight.view(num_query_groups, -1, head_dim, hidden_size)
v_proj_weight = hflayer.self_attn.v_proj.weight.view(num_query_groups, -1, head_dim, hidden_size)
qkv_proj = torch.cat([q_proj_weight, k_proj_weight, v_proj_weight], dim=1).view(-1, hidden_size).contiguous()
copied_numel += safe_copy(qkv_proj, mglayer.self_attention.linear_qkv.weight)
q_proj_bias = hflayer.self_attn.q_proj.bias.view(num_query_groups, -1)
k_proj_bias = hflayer.self_attn.k_proj.bias.view(num_query_groups, -1)
v_proj_bias = hflayer.self_attn.v_proj.bias.view(num_query_groups, -1)
qkv_bias = torch.cat([q_proj_bias, k_proj_bias, v_proj_bias], dim=1).view(-1).contiguous()
copied_numel += safe_copy(qkv_bias, mglayer.self_attention.linear_qkv.bias)
copied_numel += safe_copy(hflayer.self_attn.o_proj.weight, mglayer.self_attention.linear_proj.weight)
fc1_weight = torch.cat([hflayer.mlp.gate_proj.weight, hflayer.mlp.up_proj.weight])
copied_numel += safe_copy(fc1_weight, mglayer.mlp.linear_fc1.weight)
copied_numel += safe_copy(hflayer.mlp.down_proj.weight, mglayer.mlp.linear_fc2.weight)
copied_numel += safe_copy(hflayer.post_attention_layernorm.weight, mglayer.mlp.linear_fc1.layer_norm_weight)
copied_numel += safe_copy(hfllm.norm.weight, mgllm.decoder.final_layernorm.weight)
if not hf_config.tie_word_embeddings:
safe_copy(hfmodel.lm_head.weight, mgllm.output_layer.weight)
n_params = sum([t.numel() for t in hfllm.state_dict().values()])
assert n_params == copied_numel, f"n_params={n_params} != copied_numel={copied_numel}"
@torch.inference_mode()
def convert_checkpoint_from_transformers_to_megatron_dpskv3(
hf_model,
model,
hf_config,
tfconfig,
layer_start_end: Optional[tuple[int, int]] = None,
):
warnings.warn("MTP model is not supported yet", stacklevel=2)
if layer_start_end is None:
layer_start_end = (0, len(model.decoder.layers))
layer_start, layer_end = layer_start_end
numel: int = 0
pp_rank = mpu.get_pipeline_model_parallel_rank()
pp_size = mpu.get_pipeline_model_parallel_world_size()
ep_rank = mpu.get_expert_model_parallel_rank()
ep_size = mpu.get_expert_model_parallel_world_size()
if pp_rank == 0:
numel += safe_copy(hf_model.model.embed_tokens.weight, model.embedding.word_embeddings.weight)
assert len(model.decoder.layers) == (layer_end - layer_start), (
f"Expected {len(model.decoder.layers)} layers, but got {layer_end - layer_start}"
)
for layer_idx, (layer, hf_layer) in enumerate(
zip(model.decoder.layers, hf_model.model.layers[layer_start:layer_end], strict=True)
):
global_layer_idx = layer_idx + layer_start
numel_cur: int = numel
numel += safe_copy(hf_layer.input_layernorm.weight, layer.input_layernorm.weight)
if hf_config.q_lora_rank is None:
numel += safe_copy(hf_layer.self_attn.q_proj.weight, layer.self_attention.linear_q_proj.weight)
else:
numel += safe_copy(hf_layer.self_attn.q_a_proj.weight, layer.self_attention.linear_q_down_proj.weight)
numel += safe_copy(hf_layer.self_attn.q_b_proj.weight, layer.self_attention.linear_q_up_proj.weight)
numel += safe_copy(
hf_layer.self_attn.q_a_layernorm.weight, layer.self_attention.linear_q_up_proj.layer_norm_weight
)
numel += safe_copy(
hf_layer.self_attn.kv_a_proj_with_mqa.weight, layer.self_attention.linear_kv_down_proj.weight
)
numel += safe_copy(hf_layer.self_attn.kv_b_proj.weight, layer.self_attention.linear_kv_up_proj.weight)
numel += safe_copy(
hf_layer.self_attn.kv_a_layernorm.weight, layer.self_attention.linear_kv_up_proj.layer_norm_weight
)
numel += safe_copy(hf_layer.self_attn.o_proj.weight, layer.self_attention.linear_proj.weight)
if not hasattr(layer.mlp, "router"):
numel += safe_copy(hf_layer.post_attention_layernorm.weight, layer.mlp.linear_fc1.layer_norm_weight)
numel += safe_copy(
torch.cat([hf_layer.mlp.gate_proj.weight, hf_layer.mlp.up_proj.weight]), layer.mlp.linear_fc1.weight
)
numel += safe_copy(hf_layer.mlp.down_proj.weight, layer.mlp.linear_fc2.weight)
else:
numel += safe_copy(hf_layer.mlp.gate.weight, layer.mlp.router.weight)
# NOTE: the e_score_correction_bias in mcore model will be initialized with bfloat16 and \
# recover to fp32 in the first forward. There is always a diff in the bias between two models (~0.3%)
numel += safe_copy(
hf_layer.mlp.gate.e_score_correction_bias, layer.mlp.router.expert_bias, skip_dtype_assert=True
)
if tfconfig.moe_grouped_gemm:
for i, hf_expert in enumerate(hf_layer.mlp.experts):
num_experts = len(hf_layer.mlp.experts)
num_local_experts = num_experts // ep_size
expert_idx_start = ep_rank * num_local_experts
expert_idx_end = (ep_rank + 1) * num_local_experts
if i < expert_idx_start or i >= expert_idx_end:
continue
local_expert_idx = i - expert_idx_start
fc1_weight = torch.cat([hf_expert.gate_proj.weight, hf_expert.up_proj.weight])
linear_fc1_weighti = getattr(layer.mlp.experts.linear_fc1, "weight" + str(local_expert_idx))
numel += safe_copy(fc1_weight, linear_fc1_weighti)
linear_fc2_weighti = getattr(layer.mlp.experts.linear_fc2, "weight" + str(local_expert_idx))
numel_w2 = safe_copy(hf_expert.down_proj.weight, linear_fc2_weighti)
numel += numel_w2
else:
for i, hf_expert in enumerate(hf_layer.mlp.experts):
expert = layer.mlp.experts.local_experts[i]
fc1_weight = torch.cat([hf_expert.gate_proj.weight, hf_expert.up_proj.weight])
numel += safe_copy(fc1_weight, expert.linear_fc1.weight)
numel += safe_copy(hf_expert.down_proj.weight, expert.linear_fc2.weight)
numel += safe_copy(hf_layer.post_attention_layernorm.weight, layer.pre_mlp_layernorm.weight)
shared_fc1_weight = torch.cat(
[hf_layer.mlp.shared_experts.gate_proj.weight, hf_layer.mlp.shared_experts.up_proj.weight]
)
numel += safe_copy(shared_fc1_weight, layer.mlp.shared_experts.linear_fc1.weight)
numel += safe_copy(hf_layer.mlp.shared_experts.down_proj.weight, layer.mlp.shared_experts.linear_fc2.weight)
print(f"{pp_rank=} {global_layer_idx=} {layer_idx=} {numel=} numel this layer={numel - numel_cur}")
numel_hf_one_layer = sum([i.numel() for i in hf_layer.state_dict().values()])
if hasattr(layer.mlp, "router"):
numel_hf_one_layer -= numel_w2 * 3 * len(hf_layer.mlp.experts) // ep_size * (ep_size - 1)
assert numel - numel_cur == numel_hf_one_layer, "numel mismatch"
if pp_rank == pp_size - 1:
numel += safe_copy(hf_model.model.norm.weight, model.decoder.final_layernorm.weight)
if not hf_config.tie_word_embeddings:
numel += safe_copy(hf_model.lm_head.weight, model.output_layer.weight)
print(f"{pp_rank=} {numel=}")
return numel
@contextmanager
def noop_context() -> Any:
yield
def support_distributed_convert(hf_config: AutoConfig) -> bool:
for arch in ["DeepseekV3ForCausalLM", "Qwen3MoeForCausalLM", "Qwen2MoeForCausalLM"]:
if arch in hf_config.architectures:
return True
return False
def convert_hf_to_mcore(
hf_model_path, output_path, pp_size=1, ep_size=1, use_cpu_initialization=False, test=False, trust_remote_code=False
):
os.makedirs(output_path, exist_ok=True)
if len(os.listdir(output_path)) > 0 and not test:
print(f"Output path {output_path} is not empty, skipping conversion")
return
# init torch distributed and mpu
if "WORLD_SIZE" not in os.environ:
os.environ["RANK"] = "0"
os.environ["WORLD_SIZE"] = "1"
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
torch.distributed.init_process_group("nccl")
local_rank = os.getenv("LOCAL_RANK", 0)
world_size = dist.get_world_size()
get_torch_device().set_device(f"{get_device_name()}:{local_rank}")
if ep_size * pp_size != world_size:
pp_size = world_size
print(f"pp_size is set to {pp_size}")
mpu.initialize_model_parallel(
tensor_model_parallel_size=1,
pipeline_model_parallel_size=pp_size,
virtual_pipeline_model_parallel_size=None,
context_parallel_size=1,
expert_model_parallel_size=ep_size,
)
model_parallel_cuda_manual_seed(0)
# init hf config
hf_config = AutoConfig.from_pretrained(hf_model_path, trust_remote_code=trust_remote_code)
print(hf_config, flush=True)
if repatch:
if hf_config.architectures[0] == "DeepseekV3ForCausalLM":
config_repatch = dict(multi_head_latent_attention=True)
repatch(config_repatch)
if world_size > 1 and not support_distributed_convert(hf_config):
raise NotImplementedError(f"distributed conversion is not supported for {hf_config.architectures} yet.")
pipeline_shards = get_dynamic_pipeline_shards(hf_config.num_hidden_layers, pp_size)
print(f"Pipeline shards: {pipeline_shards}", flush=True)
tfconfig = hf_to_mcore_config(
hf_config,
torch.bfloat16,
num_layers_in_first_pipeline_stage=pipeline_shards[0] if len(pipeline_shards) > 1 else None,
num_layers_in_last_pipeline_stage=pipeline_shards[-1] if len(pipeline_shards) > 2 else None,
)
tfconfig.use_cpu_initialization = use_cpu_initialization
tie_word_embeddings = getattr(hf_config, "tie_word_embeddings", False)
# init megatron model
def megatron_model_provider(pre_process, post_process):
from verl.models.mcore import init_mcore_model
parallel_model = init_mcore_model(
tfconfig,
hf_config,
pre_process,
post_process,
share_embeddings_and_output_weights=tie_word_embeddings,
value=False,
)
return parallel_model
context: Callable[..., ContextManager] = init_empty_weights if use_cpu_initialization else noop_context
with context():
model = get_model(
model_provider_func=megatron_model_provider,
model_type=ModelType.encoder_or_decoder,
wrap_with_ddp=False,
transformer_config=tfconfig,
)
if use_cpu_initialization:
# convert meta device to empty tensor so it can use `copy_` function
model[0].module = model[0].module.to_empty(device="cpu")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from transformers import AutoModelForCausalLM, AutoModelForImageTextToText
# init hf model
if "Qwen2_5_VLForConditionalGeneration" in hf_config.architectures:
hf_model = AutoModelForImageTextToText.from_pretrained(
hf_model_path, torch_dtype=torch.bfloat16, trust_remote_code=trust_remote_code
)
else:
hf_model = AutoModelForCausalLM.from_pretrained(
hf_model_path, torch_dtype=torch.bfloat16, trust_remote_code=trust_remote_code
)
hf_state_dict = hf_model.state_dict()
pp_rank = mpu.get_pipeline_model_parallel_rank()
# distributed convert
if world_size > 1 and support_distributed_convert(hf_config):
pipeline_cumsum = np.cumsum(pipeline_shards)
layer_start = 0 if pp_rank == 0 else pipeline_cumsum[pp_rank - 1]
layer_end = pipeline_cumsum[pp_rank]
if "DeepseekV3ForCausalLM" in hf_config.architectures:
numel_partial: int = convert_checkpoint_from_transformers_to_megatron_dpskv3(
hf_model, model[0].module, hf_config, tfconfig=tfconfig, layer_start_end=(layer_start, layer_end)
)
elif "Qwen3MoeForCausalLM" in hf_config.architectures or "Qwen2MoeForCausalLM" in hf_config.architectures:
numel_partial: int = convert_checkpoint_from_transformers_to_megatron(
hf_model, model[0].module, hf_config, layer_start_end=(layer_start, layer_end)
)
else:
raise NotImplementedError(f"Distributed conversion is not supported for {hf_config.architectures} yet.")
numel_tensor = torch.tensor([numel_partial]).to(get_device_name())
dist.all_reduce(numel_tensor, op=dist.ReduceOp.SUM)
numel = int(numel_tensor.cpu().item())
print(f"total numel={numel} vs {hf_model.num_parameters()=}")
if numel != hf_model.num_parameters():
warnings.warn(f"numel mismatch: {numel=} != {hf_model.num_parameters()=}", stacklevel=1)
# load hf state dict to megatron model
elif "Qwen2MoeForCausalLM" in hf_config.architectures:
convert_checkpoint_from_transformers_to_megatron(hf_model, model[0].module, hf_config)
elif "Qwen2_5_VLForConditionalGeneration" in hf_config.architectures:
convert_checkpoint_from_transformers_to_megatron_qwen2_5_vl(hf_model, model[0].module, hf_config)
elif "DeepseekV3ForCausalLM" in hf_config.architectures:
convert_checkpoint_from_transformers_to_megatron_dpskv3(hf_model, model[0].module, hf_config, tfconfig=tfconfig)
elif "Qwen3MoeForCausalLM" in hf_config.architectures:
convert_checkpoint_from_transformers_to_megatron(hf_model, model[0].module, hf_config)
else:
assert not use_cpu_initialization, "use_cpu_initialization is only supported for MoE model"
from verl.models.mcore.loader import load_state_dict_to_megatron_gptmodel
load_state_dict_to_megatron_gptmodel(
state_dict=hf_state_dict,
wrapped_models=model,
config=hf_config,
params_dtype=torch.bfloat16,
is_value_model=False,
)
megatron_state_dict = model[0].module.sharded_state_dict()
del hf_state_dict, hf_model
# save megatron model
if len(os.listdir(output_path)) == 0:
dist_checkpointing.save(megatron_state_dict, output_path, sharded_strategy=None, async_sharded_save=False)
if test:
test_conversion(megatron_model_provider, tfconfig, output_path, model)
if __name__ == "__main__":
args = _init_args()
convert_hf_to_mcore(
args.hf_model_path,
args.output_path,
args.pp_size,
args.ep_size,
args.use_cpu_initialization,
args.test,
args.trust_remote_code,
)
|
scripts__converter_hf_to_mcore.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Diagnose script for checking OS/hardware/python/pip/verl/network.
The output of this script can be a very good hint to issue/problem.
"""
import os
import platform
import socket
import subprocess
import sys
import time
import psutil
try:
from urllib.parse import urlparse
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
from urlparse import urlparse
import argparse
import importlib.metadata
import torch
URLS = {
"PYPI": "https://pypi.python.org/pypi/pip",
}
REGIONAL_URLS = {
"cn": {
"PYPI(douban)": "https://pypi.douban.com/",
"Conda(tsinghua)": "https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/",
}
}
def test_connection(name, url, timeout=10):
"""Simple connection test"""
urlinfo = urlparse(url)
start = time.time()
try:
socket.gethostbyname(urlinfo.netloc)
except Exception as e:
print("Error resolving DNS for {}: {}, {}".format(name, url, e))
return
dns_elapsed = time.time() - start
start = time.time()
try:
_ = urlopen(url, timeout=timeout)
except Exception as e:
print("Error open {}: {}, {}, DNS finished in {} sec.".format(name, url, e, dns_elapsed))
return
load_elapsed = time.time() - start
print("Timing for {}: {}, DNS: {:.4f} sec, LOAD: {:.4f} sec.".format(name, url, dns_elapsed, load_elapsed))
def check_python():
print("----------Python Info----------")
print("Version :", platform.python_version())
print("Compiler :", platform.python_compiler())
print("Build :", platform.python_build())
print("Arch :", platform.architecture())
def check_pip():
print("------------Pip Info-----------")
try:
import pip
print("Version :", pip.__version__)
print("Directory :", os.path.dirname(pip.__file__))
except ImportError:
print("No corresponding pip install for current python.")
def _get_current_git_commit():
try:
result = subprocess.run(["git", "rev-parse", "HEAD"], capture_output=True, text=True, check=True)
return result.stdout.strip()
except subprocess.CalledProcessError as e:
print(f"Error running git command: {e.stderr.strip()}")
return None
except FileNotFoundError:
print("Did not find command: git")
return None
def check_verl():
print("----------verl Info-----------")
try:
sys.path.insert(0, os.getcwd())
import verl
print("Version :", verl.__version__)
verl_dir = os.path.dirname(verl.__file__)
print("Directory :", verl_dir)
try:
commit_hash = _get_current_git_commit()
print("Commit Hash :", commit_hash)
except AttributeError:
print("Commit hash not found. ")
except ImportError as e:
print(f"No verl installed: {e}")
except Exception as e:
import traceback
if not isinstance(e, IOError):
print("An error occurred trying to import verl.")
print("This is very likely due to missing or incompatible library files.")
print(traceback.format_exc())
def check_os():
print("----------Platform Info----------")
print("Platform :", platform.platform())
print("system :", platform.system())
print("node :", platform.node())
print("release :", platform.release())
print("version :", platform.version())
def check_hardware():
print("----------Hardware Info----------")
print("machine :", platform.machine())
print("processor :", platform.processor())
if sys.platform.startswith("darwin"):
pipe = subprocess.Popen(("sysctl", "-a"), stdout=subprocess.PIPE)
output = pipe.communicate()[0]
for line in output.split(b"\n"):
if b"brand_string" in line or b"features" in line:
print(line.strip())
elif sys.platform.startswith("linux"):
subprocess.call(["lscpu"])
elif sys.platform.startswith("win32"):
subprocess.call(["wmic", "cpu", "get", "name"])
def check_network(args):
print("----------Network Test----------")
if args.timeout > 0:
print("Setting timeout: {}".format(args.timeout))
socket.setdefaulttimeout(10)
for region in args.region.strip().split(","):
r = region.strip().lower()
if not r:
continue
if r in REGIONAL_URLS:
URLS.update(REGIONAL_URLS[r])
else:
import warnings
warnings.warn("Region {} do not need specific test, please refer to global sites.".format(r), stacklevel=2)
for name, url in URLS.items():
test_connection(name, url, args.timeout)
def check_environment():
print("----------Environment----------")
for k, v in os.environ.items():
if k.startswith("VERL_") or k.startswith("OMP_") or k.startswith("KMP_") or k == "CC" or k == "CXX":
print('{}="{}"'.format(k, v))
def check_pip_package_versions():
packages = ["vllm", "sglang", "ray", "torch"]
for package in packages:
try:
version = importlib.metadata.version(package)
print(f"{package}\t : {version}")
except importlib.metadata.PackageNotFoundError:
print(f"{package}\t : not found.")
def check_cuda_versions():
if torch.cuda.is_available():
try:
cuda_runtime_version = torch.version.cuda
print(f"CUDA Runtime : {cuda_runtime_version}")
import subprocess
nvcc_output = subprocess.check_output(["nvcc", "--version"]).decode("utf-8")
cuda_compiler_version = next((line for line in nvcc_output.splitlines() if "release" in line), None)
if cuda_compiler_version:
print(f"CUDA Compiler : {cuda_compiler_version.strip()}")
else:
print("Could not determine CUDA compiler version.")
except FileNotFoundError as e:
print(f"CUDA compiler : Not found: {e}")
except Exception as e:
print(f"An error occurred while checking CUDA versions: {e}")
else:
print("CUDA is not available.")
def _get_cpu_memory():
"""
Get the total CPU memory capacity in GB.
"""
memory = psutil.virtual_memory()
return memory.total / (1024**3)
def _get_gpu_info():
"""
Get GPU type, GPU memory, and GPU count using nvidia-smi command.
"""
try:
result = subprocess.run(
["nvidia-smi", "--query-gpu=gpu_name,memory.total", "--format=csv,noheader,nounits"],
capture_output=True,
text=True,
check=True,
)
gpu_lines = result.stdout.strip().split("\n")
gpu_count = len(gpu_lines)
gpu_info = []
for line in gpu_lines:
gpu_name, gpu_memory = line.split(", ")
gpu_info.append(
{
"type": gpu_name,
"memory": float(gpu_memory) / 1024, # Convert to GB
}
)
return gpu_count, gpu_info
except (subprocess.CalledProcessError, FileNotFoundError):
print("Failed to execute nvidia-smi command.")
return 0, []
def _get_system_info():
"""
Get CPU memory capacity, GPU type, GPU memory, and GPU count.
"""
cpu_memory = _get_cpu_memory()
gpu_count, gpu_info = _get_gpu_info()
return {"cpu_memory": cpu_memory, "gpu_count": gpu_count, "gpu_info": gpu_info}
def check_system_info():
print("----------System Info----------")
system_info = _get_system_info()
print(f"CPU Memory\t: {system_info['cpu_memory']:.2f} GB")
print(f"GPU Count\t: {system_info['gpu_count']}")
for i, gpu in enumerate(system_info["gpu_info"]):
print(f"GPU {i + 1}\tType : {gpu['type']}")
print(f"GPU {i + 1}\tMemory : {gpu['memory']:.2f} GB")
def parse_args():
"""Parse arguments."""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Diagnose script for checking the current system.",
)
choices = ["python", "pip", "verl", "system", "os", "environment"]
for choice in choices:
parser.add_argument("--" + choice, default=1, type=int, help="Diagnose {}.".format(choice))
parser.add_argument("--network", default=0, type=int, help="Diagnose network.")
parser.add_argument("--hardware", default=0, type=int, help="Diagnose hardware.")
parser.add_argument(
"--region",
default="",
type=str,
help="Additional sites in which region(s) to test. \
Specify 'cn' for example to test mirror sites in China.",
)
parser.add_argument("--timeout", default=10, type=int, help="Connection test timeout threshold, 0 to disable.")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
if args.python:
check_python()
if args.pip:
check_pip()
check_pip_package_versions()
if args.verl:
check_verl()
if args.os:
check_os()
if args.hardware:
check_hardware()
if args.network:
check_network(args)
if args.environment:
check_environment()
check_cuda_versions()
if args.system:
check_system_info()
|
scripts__diagnose.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script override a model with custom config and random weights, mainly for create small models for
debugging purposes.
Usage:
python scripts/init_random_model.py \
--hf_model_path <path_to_hf_model> \
--new_config_path <path_to_new_config.json> \
--output_path <path_to_output_model>
"""
import argparse
import json
import os
import warnings
from typing import Any
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, PretrainedConfig
def _init_args():
parser = argparse.ArgumentParser()
parser.add_argument("--hf_model_path", type=str, required=True, help="The path for the huggingface model")
parser.add_argument("--new_config_path", type=str, required=True, help="The path for the new config file")
parser.add_argument("--output_path", type=str, required=True, help="The path for the output random model")
parser.add_argument(
"--trust_remote_code",
action="store_true",
help="Whether to trust remote code when loading HF model. Disabled by default for security.",
)
args = parser.parse_args()
return args
def check_output_path(output_path: str):
if os.path.exists(output_path):
warnings.warn(f"Output path '{output_path}' already exists. Will do nothing.", stacklevel=2)
exit()
else:
os.makedirs(output_path, exist_ok=True)
print(f"Output path '{output_path}' created.")
def check_configs(original_config: dict[str, Any], new_config: dict[str, Any]) -> bool:
"""
Check if the original config and new config are compatible.
This is a placeholder function; actual implementation may vary based on requirements.
"""
# Example check: ensure 'model_type' is the same
if new_config.get("model_type", None) is not None and original_config.get("model_type") != new_config.get(
"model_type"
):
raise RuntimeError("Model types do not match.")
for key in new_config:
if key not in original_config:
warnings.warn(
f"Key '{key}' in new config does not exist in original config, may not take effect.", stacklevel=2
)
def init_random_model(hf_model_path, new_config_path, output_path, trust_remote_code: bool = False):
config = AutoConfig.from_pretrained(hf_model_path, trust_remote_code=trust_remote_code)
tokenizer = AutoTokenizer.from_pretrained(hf_model_path, trust_remote_code=trust_remote_code)
config_dict = PretrainedConfig.get_config_dict(hf_model_path)[0]
print(config_dict)
with open(new_config_path) as f:
new_config_dict = json.load(f)
check_configs(config_dict, new_config_dict)
config_dict.update(new_config_dict)
new_confg = config.from_dict(config_dict)
print(f"new_config: {new_confg}")
if trust_remote_code:
model = AutoModelForCausalLM.from_pretrained(
hf_model_path, config=new_confg, trust_remote_code=trust_remote_code
)
else:
model = AutoModelForCausalLM.from_config(new_confg)
model.save_pretrained(output_path)
tokenizer.save_pretrained(output_path)
new_confg.save_pretrained(output_path)
print(f"Random model initialized and saved to {output_path}")
if __name__ == "__main__":
args = _init_args()
check_output_path(args.output_path)
init_random_model(
hf_model_path=args.hf_model_path,
new_config_path=args.new_config_path,
output_path=args.output_path,
trust_remote_code=args.trust_remote_code,
)
|
scripts__init_random_model.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is used to merge huggingface model and test verl checkpoints from FSDP and Megatron backends.
To merge FSDP checkpoints:
```sh
python scripts/legacy_model_merger.py merge \
--backend fsdp \
--local_dir checkpoints/verl_fsdp_gsm8k_examples/qwen2_5_0b5_fsdp_saveload/global_step_1/actor \
--target_dir /path/to/merged_hf_model
```
To merge Megatron checkpoints:
```sh
python scripts/legacy_model_merger.py merge \
--backend megatron \
--tie-word-embedding \
--local_dir checkpoints/verl_megatron_gsm8k_examples/qwen2_5_0b5_megatron_saveload/global_step_1/actor \
--target_dir /path/to/merged_hf_model
```
For more details, please refer to documentation:
https://verl.readthedocs.io/en/latest/advance/checkpoint.html#convert-fsdp-and-megatron-checkpoints-to-huggingface-format-model
"""
import argparse
import os
import re
import warnings
from abc import ABC, abstractmethod
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
import numpy as np
import torch
from accelerate import init_empty_weights
from safetensors.torch import load_file
from torch.distributed._tensor import Placement, Shard
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoModelForTokenClassification,
AutoModelForVision2Seq,
GenerationConfig,
PretrainedConfig,
)
try:
# for torch 2.5+
from torch.distributed.tensor import DTensor
except ImportError:
from torch.distributed._tensor import DTensor
from tqdm import tqdm
from verl.utils import hf_processor, hf_tokenizer
@dataclass
class ModelMergerConfig:
operation: str # 'merge' or 'test'
backend: str
local_dir: str
hf_model_config_path: str
target_dir: Optional[str] = "tmp"
hf_upload_path: Optional[str] = None
private: bool = False
test_hf_dir: Optional[str] = None
tie_word_embedding: bool = False
is_value_model: bool = False
hf_model_path: Optional[str] = None
hf_upload: bool = field(init=False)
def __post_init__(self):
self.hf_upload = self.operation == "merge" and bool(self.hf_upload_path)
if self.operation == "test":
self.target_dir = None
self.hf_upload_path = None
self.private = False
class BaseModelMerger(ABC):
def __init__(self, config: ModelMergerConfig):
self.config = config
self.hf_model_config_path = config.hf_model_config_path
if config.hf_model_path:
print(
"Warning: --hf_model_path is deprecated and will be removed in a future version. Currently verl will save huggingface model configuration files into checkpoint directories. Therefore, there is no need to provide --hf_model_path. "
)
self.hf_model_config_path = config.hf_model_path
# Auto-detect huggingface subdirectory if it exists
huggingface_subdir = os.path.join(self.hf_model_config_path, "huggingface")
if os.path.isdir(huggingface_subdir):
self.hf_model_config_path = huggingface_subdir
self.model_config = AutoConfig.from_pretrained(self.hf_model_config_path)
def get_transformers_auto_model_class(self):
# Handle case where architectures might be None or empty
if self.model_config.architectures is None or len(self.model_config.architectures) == 0:
# Try to infer from model_type if architectures is missing
model_type = getattr(self.model_config, 'model_type', '').lower()
if 'vision' in model_type or 'vl' in model_type:
return AutoModelForVision2Seq
elif 'causal' in model_type or 'gpt' in model_type or 'llama' in model_type or 'qwen' in model_type:
return AutoModelForCausalLM
else:
raise NotImplementedError(
f"Cannot determine model class: architectures is None and model_type '{model_type}' is not recognized"
)
architecture = self.model_config.architectures[0]
if "ForTokenClassification" in architecture:
return AutoModelForTokenClassification
elif "ForCausalLM" in architecture:
return AutoModelForCausalLM
elif "ForConditionalGeneration" in architecture:
return AutoModelForVision2Seq
raise NotImplementedError(f"Unknown architecture {self.model_config.architectures}")
def patch_model_generation_config(self, model):
"""
The generation_config created from model config may be different to the pretrained model,
this may lead to error when generating: https://github.com/volcengine/verl/issues/1246
This function patch the generation_config created from model config to the pretrained model.
"""
if model.can_generate():
try:
model.generation_config = GenerationConfig.from_pretrained(self.hf_model_config_path)
except OSError:
print(
f"Warning: Generation config file not found in {self.hf_model_config_path}, using a generation config created from the model config."
)
return model
def save_lora_adapter(self, state_dict: dict[str, torch.Tensor]):
"""
Save lora adapter to safetensors.
Returns:
lora_path: str, the path to the lora adapter. None if no lora adapter found.
Note:
This function change the 'state_dict' in place.
"""
lora_params_names = [name for name in state_dict.keys() if "lora_" in name]
if len(lora_params_names) == 0:
return None
import json
from typing import OrderedDict
import peft
from safetensors.torch import save_file
lora_params = OrderedDict()
target_modules = set()
lora_key = None
for name in lora_params_names:
lora_key = name.replace(".default.weight", ".weight")
target_modules.add(lora_key.split(".")[-3])
lora_params[lora_key] = state_dict.pop(name)
lora_rank = min(lora_params[lora_key].shape[0], lora_params[lora_key].shape[1])
peft_dict = {
"r": lora_rank,
"lora_alpha": 0, # lora_alpha is not set. An error should be raised to inform the user to set it manually.
"target_modules": list(target_modules),
}
peft_config = peft.LoraConfig(**peft_dict).to_dict()
peft_config["task_type"] = peft_config["task_type"].value if peft_config["task_type"] else None
peft_config["peft_type"] = peft_config["peft_type"].value if peft_config["peft_type"] else None
peft_config["target_modules"] = list(peft_config["target_modules"])
lora_path = os.path.join(self.config.target_dir, "lora_adapter")
os.makedirs(lora_path, exist_ok=True)
with open(os.path.join(lora_path, "adapter_config.json"), "w", encoding="utf-8") as f:
json.dump(peft_config, f, ensure_ascii=False, indent=4)
save_file(lora_params, os.path.join(lora_path, "adapter_model.safetensors"))
for name in list(state_dict.keys()):
key = (
name.replace("base_model.model.", "")
.replace(".base_layer.weight", ".weight")
.replace(".base_layer.bias", ".bias")
)
state_dict[key] = state_dict.pop(name)
return lora_path
def save_hf_model_and_tokenizer(self, state_dict: dict[str, torch.Tensor]):
auto_model_class = self.get_transformers_auto_model_class()
with init_empty_weights():
model = auto_model_class.from_config(self.model_config, torch_dtype=torch.bfloat16)
model.to_empty(device="cpu")
model = self.patch_model_generation_config(model)
lora_path = self.save_lora_adapter(state_dict)
if lora_path:
print(f"Saving lora adapter to {lora_path}")
print(f"Saving model to {self.config.target_dir}")
model.save_pretrained(self.config.target_dir, state_dict=state_dict)
del state_dict
del model
processor = hf_processor(self.hf_model_config_path)
try:
tokenizer = hf_tokenizer(self.hf_model_config_path)
except Exception as e:
warnings.warn(f"Failed to create tokenizer: {e}. This may affect tokenizer saving", stacklevel=1)
tokenizer = None
if processor is not None:
print(f"Saving processor to {self.config.target_dir}")
processor.save_pretrained(self.config.target_dir)
if tokenizer is not None:
print(f"Saving tokenizer to {self.config.target_dir}")
tokenizer.save_pretrained(self.config.target_dir)
def upload_to_huggingface(self):
from huggingface_hub import HfApi
api = HfApi()
api.create_repo(repo_id=self.config.hf_upload_path, private=self.config.private, exist_ok=True)
api.upload_folder(folder_path=self.config.target_dir, repo_id=self.config.hf_upload_path, repo_type="model")
@abstractmethod
def merge_and_save(self):
raise NotImplementedError("Subclasses should implement this method")
class FSDPModelMerger(BaseModelMerger):
def _get_world_size(self) -> int:
"""Extracts the FSDP world_size from checkpoint filenames (e.g., 'model_world_size_8_rank_0.pt')."""
for filename in os.listdir(self.config.local_dir):
match = re.match(r"model_world_size_(\d+)_rank_0\.pt", filename)
if match:
return int(match.group(1))
raise FileNotFoundError(
f"Could not determine world size. No file matching 'model_world_size_(\\d+)_rank_0.pt' found in {self.config.local_dir}"
)
def _load_rank_zero_state_dict(self, world_size: int) -> dict:
return torch.load(
Path(self.config.local_dir) / f"model_world_size_{world_size}_rank_0.pt",
map_location="cpu",
weights_only=False,
)
def _extract_device_mesh_info(self, state_dict: dict, world_size: int) -> tuple[np.ndarray, tuple[str, ...]]:
"""
Retrieves sharding information (device_mesh, mesh_dim_names) from a DTensor in the state_dict.
If no DTensor is found, infers a simple FSDP mesh based on world_size.
"""
pivot_key = sorted(list(state_dict.keys()))[0]
weight = state_dict[pivot_key]
if isinstance(weight, DTensor):
# get sharding info
device_mesh = weight.device_mesh
mesh = device_mesh.mesh
mesh_dim_names = device_mesh.mesh_dim_names
else:
# for non-DTensor
mesh = np.array([world_size], dtype=np.int64)
mesh_dim_names = ("fsdp",)
return mesh, mesh_dim_names
def _calculate_shard_configuration(
self, mesh: np.ndarray, mesh_dim_names: tuple[str, ...]
) -> tuple[int, tuple[int, ...]]:
"""Calculates the total number of shards and the shape of the device mesh."""
assert mesh_dim_names in (("fsdp",), ("ddp", "fsdp")), f"Unsupported mesh_dim_names {mesh_dim_names}"
if "tp" in mesh_dim_names:
# TODO: "tp" is not supported yet due to the above assert
total_shards = mesh.shape[-1] * mesh.shape[-2]
mesh_shape = (mesh.shape[-2], mesh.shape[-1])
else:
total_shards = mesh.shape[-1]
mesh_shape = (mesh.shape[-1],)
return total_shards, mesh_shape
def _merge_by_placement(self, tensors: list[torch.Tensor], placement: Placement) -> torch.Tensor:
"""Merges a list of tensors based on their DTensor placement"""
if placement.is_replicate():
return tensors[0]
elif placement.is_partial():
raise NotImplementedError("Partial placement is not supported yet")
elif placement.is_shard():
return torch.cat(tensors, dim=placement.dim).contiguous()
raise NotImplementedError(f"Unsupported placement: {placement}")
def _load_and_merge_state_dicts(
self, world_size: int, total_shards: int, mesh_shape: tuple[int, ...], mesh_dim_names: tuple[str, ...]
) -> dict[str, torch.Tensor]:
model_state_dict_lst = [None] * total_shards
def process_one_shard(rank: int, model_state_dict_lst: list):
model_path = Path(self.config.local_dir) / f"model_world_size_{world_size}_rank_{rank}.pt"
state_dict = torch.load(model_path, map_location="cpu", weights_only=False)
model_state_dict_lst[rank] = state_dict
return state_dict
with ThreadPoolExecutor(max_workers=min(32, os.cpu_count())) as executor:
futures = [executor.submit(process_one_shard, rank, model_state_dict_lst) for rank in range(total_shards)]
for future in tqdm(futures, desc=f"Loading {total_shards} FSDP shards", total=total_shards):
future.result()
# Merge state dicts from all shards
state_dict = {}
param_placements: dict[str, list] = {}
for key in set(model_state_dict_lst[0].keys()):
state_dict[key] = []
for model_state_shard in model_state_dict_lst:
# add tensor shard in order of rank to state_dict[key]
tensor = model_state_shard.pop(key)
if isinstance(tensor, DTensor):
state_dict[key].append(tensor._local_tensor.bfloat16())
placements = tuple(tensor.placements)
# replicated placement at dp dimension can be discarded
if mesh_dim_names[0] in ("dp", "ddp"):
placements = placements[1:]
if key not in param_placements:
param_placements[key] = placements
else:
assert param_placements[key] == placements
else:
state_dict[key].append(tensor.bfloat16())
del model_state_dict_lst
# Merge tensors
for key in sorted(state_dict):
if not isinstance(state_dict[key], list):
print(f"No need to merge key {key}")
continue
if key in param_placements:
# merge shards
placements: tuple[Shard] = param_placements[key]
if len(mesh_shape) == 1:
# 1-D list, FSDP without TP
assert len(placements) == 1
shards = state_dict[key]
state_dict[key] = self._merge_by_placement(shards, placements[0])
else:
# 2-D list, FSDP + TP
raise NotImplementedError("FSDP + TP is not supported yet")
else:
state_dict[key] = torch.cat(state_dict[key], dim=0)
return state_dict
def merge_and_save(self):
world_size = self._get_world_size()
rank_zero_state_dict = self._load_rank_zero_state_dict(world_size)
mesh, mesh_dim_names = self._extract_device_mesh_info(rank_zero_state_dict, world_size)
print(f"Got device mesh {mesh}, mesh_dim_names {mesh_dim_names}")
total_shards, mesh_shape = self._calculate_shard_configuration(mesh, mesh_dim_names)
print(f"Processing model shards with {total_shards} {mesh_shape} in total")
merged_state_dict = self._load_and_merge_state_dicts(world_size, total_shards, mesh_shape, mesh_dim_names)
if self.config.operation == "test":
if not self.config.test_hf_dir:
raise ValueError("test_hf_dir must be provided for test operation")
self._test_state_dict(merged_state_dict)
elif self.config.operation == "merge":
self.save_hf_model_and_tokenizer(merged_state_dict)
if self.config.hf_upload:
self.upload_to_huggingface()
else:
raise ValueError(f"Unknown operation: {self.config.operation}")
def _test_state_dict(self, state_dict: dict[str, torch.Tensor]):
auto_model_class = self.get_transformers_auto_model_class()
hf_model = auto_model_class.from_pretrained(self.config.test_hf_dir, torch_dtype=torch.bfloat16)
hf_state_dict = hf_model.state_dict()
del hf_model
hf_model_keys = set(hf_state_dict.keys())
collected_keys = set(state_dict.keys())
missing_keys = hf_model_keys - collected_keys
assert len(missing_keys) == 0, f"Missing keys in collected state dict: {list(sorted(missing_keys))}"
extra_keys = collected_keys - hf_model_keys
assert len(extra_keys) == 0, f"Extra keys in collected state dict: {list(sorted(extra_keys))}"
for key in hf_model_keys:
hf_shape = hf_state_dict[key].shape
collected_shape = state_dict[key].shape
assert hf_shape == collected_shape, (
f"Shape mismatch for key '{key}': original {hf_shape} vs collected {collected_shape}"
)
hf_dtype = hf_state_dict[key].dtype
collected_dtype = state_dict[key].dtype
assert hf_dtype == collected_dtype, (
f"Dtype mismatch for key '{key}': original {hf_dtype} vs collected {collected_dtype}"
)
torch.testing.assert_close(hf_state_dict[key], state_dict[key], atol=1e-6, rtol=1e-6)
print("FSDP checks passed: The merged state_dict matches the hf model saved by FSDPCheckpointManager.")
class MegatronModelMerger(BaseModelMerger):
def __init__(self, config: ModelMergerConfig):
from verl.utils.megatron_utils import get_hf_config_and_tokenizer_checkpoint_path
config.hf_model_config_path = get_hf_config_and_tokenizer_checkpoint_path(config.local_dir)
super().__init__(config)
self.params_mapping = {
# megatron core gpt model name, huggingface model name
# NOTICE: It's a little bit tricky, when 2 keys have the same prefix, we need to make sure the longer key within the containing relationship is processed first.
"embedding.word_embeddings": "model.embed_tokens",
# attn
"self_attention.linear_qkv.layer_norm_weight": "input_layernorm.weight",
"self_attention.linear_qkv.layer_norm_bias": "input_layernorm.bias",
"self_attention.linear_qkv": "self_attn.qkv_proj",
"self_attention.q_layernorm": "self_attn.q_norm",
"self_attention.k_layernorm": "self_attn.k_norm",
"self_attention.linear_proj": "self_attn.o_proj",
# mla
"self_attention.linear_q_proj": "self_attn.q_proj",
"self_attention.linear_q_down_proj": "self_attn.q_a_proj",
"self_attention.linear_q_up_proj.layer_norm_weight": "self_attn.q_a_layernorm.weight",
"self_attention.linear_q_up_proj": "self_attn.q_b_proj",
"self_attention.linear_kv_down_proj": "self_attn.kv_a_proj_with_mqa",
"self_attention.linear_kv_up_proj.layer_norm_weight": "self_attn.kv_a_layernorm.weight",
"self_attention.linear_kv_up_proj": "self_attn.kv_b_proj",
# mlp
"pre_mlp_layernorm": "post_attention_layernorm",
"mlp.linear_fc1.layer_norm_weight": "post_attention_layernorm.weight",
"mlp.linear_fc1.layer_norm_bias": "post_attention_layernorm.bias",
"mlp.linear_fc1": "mlp.gate_up_proj",
"mlp.linear_fc2": "mlp.down_proj",
# moe
"mlp.router.expert_bias": "mlp.gate.e_score_correction_bias",
"mlp.router": "mlp.gate",
"mlp.shared_experts.linear_fc1": "mlp.shared_experts.gate_up_proj",
"mlp.shared_experts.linear_fc2": "mlp.shared_experts.down_proj",
"linear_fc1": "gate_up_proj",
"linear_fc2": "down_proj",
# output
"final_layernorm": "norm",
"output_layer": "lm_head",
}
def _get_tp_pp_rank_from_sharded_dir(self, sharded_dir: str) -> tuple[int, int]:
tp_rank = pp_rank = None
rank_list = sharded_dir.split("_")[2:]
if re.match(r"mp_rank_(\d\d)_(\d\d\d)", sharded_dir):
tp_rank = int(rank_list[0])
pp_rank = int(rank_list[1])
elif re.match(r"mp_rank_(\d\d)", sharded_dir):
tp_rank = int(rank_list[0])
pp_rank = 0
assert tp_rank is not None and pp_rank is not None, f"Invalid sharded dir {sharded_dir}"
return tp_rank, pp_rank
def _check_megatron_checkpoint_path(self, model_path: str) -> tuple[list[str], int, int]:
"""
Validates the Megatron checkpoint structure (presence of 'model.pt' in sharded directories).
Determines TP and PP sizes from directory names.
"""
tp_size = 0
pp_size = 0
sharded_dirs = sorted(os.listdir(model_path))
for sharded_dir in sharded_dirs:
assert "model.pt" in os.listdir(Path(model_path) / sharded_dir), f"model.pt not found in {sharded_dir}"
tp_rank, pp_rank = self._get_tp_pp_rank_from_sharded_dir(sharded_dir)
tp_size = max(tp_size, tp_rank + 1)
pp_size = max(pp_size, pp_rank + 1)
return sharded_dirs, tp_size, pp_size
def _merge_across_tp(
self,
key: str,
tp_data: list[torch.Tensor],
config: PretrainedConfig,
tp_size: int,
is_value_model: bool = False,
) -> Union[torch.Tensor, list[torch.Tensor]]:
if "linear_fc1.weight" in key:
# if the tensor is gate and proj
gate_lst = []
up_lst = []
for infer_param in tp_data:
gate, up = infer_param.chunk(2)
gate_lst.append(gate)
up_lst.append(up)
gate = torch.cat(gate_lst, dim=0)
up = torch.cat(up_lst, dim=0)
return [gate, up]
elif "self_attention.linear_qkv." in key and "layer_norm" not in key:
# if the tensor is qkv, for each param on tp, split into q, k, v
# concat q, k, v separately.
q_lst = []
k_lst = []
v_lst = []
assert config.num_attention_heads % config.num_key_value_heads == 0
num_q_per_kv = config.num_attention_heads // config.num_key_value_heads
assert tp_data[0].shape[0] % (num_q_per_kv + 2) == 0
kv_size_per_tp = tp_data[0].shape[0] // (num_q_per_kv + 2)
split_size = [kv_size_per_tp * num_q_per_kv, kv_size_per_tp, kv_size_per_tp]
for infer_param in tp_data:
num_query_groups_per_partition = config.num_key_value_heads // tp_size
for chunk in infer_param.chunk(num_query_groups_per_partition):
split_size = [
kv_size_per_tp * num_q_per_kv // num_query_groups_per_partition,
kv_size_per_tp // num_query_groups_per_partition,
kv_size_per_tp // num_query_groups_per_partition,
]
q, k, v = chunk.split(split_size)
q_lst.append(q)
k_lst.append(k)
v_lst.append(v)
q = torch.cat(q_lst, dim=0)
k = torch.cat(k_lst, dim=0)
v = torch.cat(v_lst, dim=0)
return [q, k, v]
elif "layer_norm" in key or "layernorm" in key or "router" in key or ("output_layer" in key and is_value_model):
return tp_data[0]
else:
dim = 0
if "linear_fc2.weight" in key or "self_attention.linear_proj" in key:
dim = 1
return torch.cat(tp_data, dim=dim)
def _load_state_dicts(
self, model_ckpt_path: str, sharded_dirs: list[str], tp_size: int, pp_size: int
) -> list[list[dict]]:
model_state_dict_lst = [[None for _ in range(tp_size)] for _ in range(pp_size)]
def _process_one_megatron_shard(sharded_dir: str):
model_file_path = Path(model_ckpt_path) / sharded_dir / "model.pt"
state_dict = torch.load(model_file_path, map_location="cpu", weights_only=False)
tp_rank, pp_rank = self._get_tp_pp_rank_from_sharded_dir(sharded_dir)
model_state_dict_lst[pp_rank][tp_rank] = state_dict
with ThreadPoolExecutor(max_workers=min(32, os.cpu_count())) as executor:
futures = [executor.submit(_process_one_megatron_shard, sharded_dir) for sharded_dir in sharded_dirs]
for future in tqdm(futures, desc=f"Loading {len(sharded_dirs)} Megatron shards", total=len(sharded_dirs)):
future.result()
return model_state_dict_lst
def _check_megatron_state_key(self, key: str) -> bool:
"""
Checks if the key is a valid Megatron state key.
Now the model merger only supports keys that start with "decoder/embedding/output_layer" in TransformerLayer.
Shall not use key starts with "model."
"""
if key.startswith("model."):
raise ValueError(
f"Invalid key {key} in Megatron state_dict. Expected keys to start with 'decoder/embedding/output_layer' in TransformerLayer."
)
skip_checking_keys = ["embedding.word_embeddings", "output_layer"]
for skip_key in skip_checking_keys:
if skip_key in key:
print(f"skip checking key {key}")
return
# Exclude extra state keys
if not key.startswith("decoder"):
raise ValueError(
f"Invalid key {key} in Megatron state_dict. Expected keys to start with 'decoder' in TransformerLayer."
)
def _merge_state_dicts(
self, model_state_dict_lst: list[list[dict]], tp_size: int, pp_size: int
) -> dict[str, torch.Tensor]:
state_dict = {}
vpp_size = len(model_state_dict_lst[0][0])
layers_cum = 0
for vpp_rank in range(vpp_size):
for pp_rank in range(pp_size):
layers_handled = 0
keys = model_state_dict_lst[pp_rank][0][vpp_rank].keys()
for key in keys:
if "extra_state" in key:
continue
if self.config.tie_word_embedding and ("output_layer" in key):
print("skip lm_head and reward_head loading because of tie_word_embeddings")
continue
self._check_megatron_state_key(key)
hf_name = self._replace_name(key, self.params_mapping)
assert hf_name is not None, f"Failed to convert layer name [{key}] from megatron to huggingface."
if "model.layers." in hf_name:
local_layer_no = int(hf_name.split(".")[2])
layers_handled = max(local_layer_no, layers_handled)
global_layer_no = local_layer_no + layers_cum
new_key_list = hf_name.split(".")
new_key_list[2] = str(global_layer_no)
hf_name = ".".join(new_key_list)
else:
warnings.warn(f"hf_name {hf_name} will not be fixed with layer number", stacklevel=2)
tp_data = [model_state_dict_lst[pp_rank][tp_rank][vpp_rank][key] for tp_rank in range(tp_size)]
merged = self._merge_across_tp(key, tp_data, self.model_config, tp_size, self.config.is_value_model)
if not isinstance(merged, list):
state_dict[hf_name] = merged
elif len(merged) == 3:
# split qkv
for n, d in zip(["q", "k", "v"], merged):
state_dict[hf_name.replace("qkv", n)] = d
elif len(merged) == 2:
# split gate up
state_dict[hf_name.replace("gate_up", "gate")] = merged[0]
state_dict[hf_name.replace("gate_up", "up")] = merged[1]
print(
f"converted {key} to {hf_name} with shape {merged.shape if isinstance(merged, torch.Tensor) else [t.shape for t in merged]}"
)
layers_cum += layers_handled + 1 # zero based
return state_dict
def merge_and_save(self):
from verl.utils.megatron_utils import get_model_checkpoint_path
model_ckpt_path = get_model_checkpoint_path(self.config.local_dir)
sharded_dirs, tp_size, pp_size = self._check_megatron_checkpoint_path(model_ckpt_path)
print(f"sharded_dirs: {sharded_dirs}, tp_size: {tp_size}, pp_size: {pp_size}, mp_size: {len(sharded_dirs)}")
model_state_dict_lst = self._load_state_dicts(model_ckpt_path, sharded_dirs, tp_size, pp_size)
merged_state_dict = self._merge_state_dicts(model_state_dict_lst, tp_size, pp_size)
del model_state_dict_lst
if self.config.operation == "test":
if not self.config.test_hf_dir:
raise ValueError("test_hf_dir must be provided for test operation")
self._test_state_dict(merged_state_dict)
elif self.config.operation == "merge":
self.save_hf_model_and_tokenizer(merged_state_dict)
if self.config.hf_upload:
self.upload_to_huggingface()
else:
raise ValueError(f"Unknown operation: {self.config.operation}")
def _test_state_dict(self, state_dict: dict[str, torch.Tensor]):
"""
Compares the merged Megatron state_dict against a reference safetensors model.
Applies necessary name mappings from Megatron to Hugging Face conventions using _replace_name.
"""
ref_state_dict = load_file(Path(self.config.test_hf_dir) / "model.safetensors")
for name, loaded_weight in state_dict.items():
# name = self._replace_name(original_name, self.params_mapping)
if not name or name.endswith(".bias") and name not in ref_state_dict:
continue
if "rotary_emb.inv_freq" in name:
continue
if self.config.tie_word_embedding and "lm_head.weight" in name:
continue
if name not in ref_state_dict:
raise RuntimeError(f"key: {name} not exist in state_dict")
param = ref_state_dict[name]
assert loaded_weight.dtype == param.dtype
torch.testing.assert_close(loaded_weight, param, atol=1e-2, rtol=5e-2)
def _replace_name(self, megatron_name: str, name_mapping: dict[str, str]) -> str:
for m_name, v_name in name_mapping.items():
if m_name not in megatron_name:
continue
megatron_name = megatron_name.replace("decoder", "model")
param_name = megatron_name.replace(m_name, v_name)
return param_name
return None # Return None if no mapping found
def main():
parser = argparse.ArgumentParser(description="verl model merger")
subparsers = parser.add_subparsers(dest="operation", required=True, help="Specify 'merge' or 'test' operation.")
base_op_parser = argparse.ArgumentParser(add_help=False)
base_op_parser.add_argument(
"--backend", type=str, required=True, choices=["fsdp", "megatron"], help="The backend of the model"
)
base_op_parser.add_argument("--local_dir", type=str, required=True, help="Path to the saved model checkpoints")
base_op_parser.add_argument(
"--hf_model_path",
type=str,
default=None,
help="(Deprecated) Path to the original Hugging Face model for config.",
)
base_op_parser.add_argument(
"--tie-word-embedding",
action="store_true",
help="Whether to tie word embedding weights (currently only Megatron supported)",
)
base_op_parser.add_argument(
"--is-value-model",
action="store_true",
help="Whether the model is a value model (currently only Megatron supported)",
)
merge_parser = subparsers.add_parser("merge", parents=[base_op_parser], help="Merge model checkpoints and save.")
merge_parser.add_argument(
"--target_dir", default="tmp", type=str, help="Directory to save the merged huggingface model"
)
merge_parser.add_argument(
"--hf_upload_path", default=None, type=str, help="Hugging Face repository ID to upload the model"
)
merge_parser.add_argument(
"--private", action="store_true", help="Whether to upload the model to a private Hugging Face repository"
)
test_parser = subparsers.add_parser(
"test", parents=[base_op_parser], help="Test merged model against a reference Hugging Face model"
)
test_parser.add_argument(
"--test_hf_dir", type=str, required=True, help="Path to the reference Hugging Face model directory for testing"
)
args = parser.parse_args()
common_config_args = {
"operation": args.operation,
"backend": args.backend,
"tie_word_embedding": args.tie_word_embedding,
"is_value_model": args.is_value_model,
"local_dir": args.local_dir,
"hf_model_path": args.hf_model_path,
"hf_model_config_path": args.local_dir,
}
if args.operation == "merge":
config = ModelMergerConfig(
**common_config_args,
target_dir=args.target_dir,
hf_upload_path=args.hf_upload_path,
private=args.private,
test_hf_dir=None,
)
os.makedirs(config.target_dir, exist_ok=True)
elif args.operation == "test":
config = ModelMergerConfig(
**common_config_args,
test_hf_dir=args.test_hf_dir,
# the following args are not used by test operation
target_dir=None,
hf_upload_path=None,
private=False,
)
else:
raise NotImplementedError(f"Unknown operation: {args.operation}")
if config.backend == "fsdp":
merger = FSDPModelMerger(config)
elif config.backend == "megatron":
merger = MegatronModelMerger(config)
else:
raise NotImplementedError(f"Unknown backend: {config.backend}")
merger.merge_and_save()
if __name__ == "__main__":
main()
|
scripts__legacy_model_merger.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pprint import pprint
import hydra
import ray
import torch
from omegaconf import OmegaConf
from verl.single_controller.base.decorator import Dispatch, register
from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup
from verl.utils.megatron_utils import get_hf_model_checkpoint_path, load_megatron_model_to_gpu
from verl.workers.megatron_workers import ActorRolloutRefWorker
os.environ["NCCL_DEBUG"] = "WARN"
os.environ["TOKENIZERS_PARALLELISM"] = "true"
class CustomSaveWorker(ActorRolloutRefWorker):
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def save_merged_weights(self, hf_ckpt_path):
import os
if self._is_offload_param:
load_megatron_model_to_gpu(self.actor_module)
torch.distributed.barrier()
print(f"[Rank {os.environ.get('RANK', '?')}] Saving weights to {hf_ckpt_path}...")
if self.vanilla_bridge:
self.bridge.save_weights(
self.actor_module, hf_ckpt_path, distributed_filesystem=True, memory_efficient=True
)
else:
self.bridge.save_hf_weights(self.actor_module, hf_ckpt_path)
return True
@hydra.main(config_path="../verl/trainer/config", config_name="ppo_megatron_trainer", version_base=None)
def main(config):
assert config.actor_rollout_ref.model.lora.adapter_path is not None, "adapter_path must be specified"
if (
config.actor_rollout_ref.actor.optim.lr_decay_steps is None
or config.actor_rollout_ref.actor.optim.lr_decay_steps < 1
):
# set to bypass OptimizerParamScheduler checks
config.actor_rollout_ref.actor.optim.lr_decay_steps = 100000
run_merge(config)
def run_merge(config) -> None:
if not ray.is_initialized():
# this is for local ray cluster
default_runtime_env = {"env_vars": {"TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN"}}
ray_init_kwargs = config.ray_kwargs.get("ray_init", {})
runtime_env_kwargs = ray_init_kwargs.get("runtime_env", {})
runtime_env = OmegaConf.merge(default_runtime_env, runtime_env_kwargs)
ray_init_kwargs = OmegaConf.create({**ray_init_kwargs, "runtime_env": runtime_env})
print(f"ray init kwargs: {ray_init_kwargs}")
ray.init(**OmegaConf.to_container(ray_init_kwargs))
ray.get(main_task.remote(config))
@ray.remote(num_cpus=1)
def main_task(config):
pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values
OmegaConf.resolve(config)
ray_cls_with_init = RayClassWithInitArgs(
cls=ray.remote(CustomSaveWorker), config=config.actor_rollout_ref, role="actor"
)
resource_pool = RayResourcePool(process_on_nodes=[config.trainer.n_gpus_per_node] * config.trainer.nnodes)
worker = RayWorkerGroup(
resource_pool=resource_pool,
ray_cls_with_init=ray_cls_with_init,
device_name=config.trainer.device,
)
worker.init_model()
adapter_path = config.actor_rollout_ref.model.lora.adapter_path
hf_ckpt_path = get_hf_model_checkpoint_path(os.path.dirname(adapter_path))
worker.save_merged_weights(hf_ckpt_path)
if __name__ == "__main__":
"""
Use the same config as your training script, besides **specifying the adapter_path**.
For example, your training script starts with:
`python3 -m verl.trainer.main_ppo --config-name=ppo_megatron_trainer ...`
Now replace it with
`python3 ./scripts/megatron_merge_lora.py --config-name=ppo_megatron_trainer ...`
"""
main()
|
scripts__megatron_merge_lora.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import hydra
except ImportError as e:
raise ImportError("Please install hydra-core via 'pip install hydra-core' and retry.") from e
@hydra.main(config_path="../verl/trainer/config", config_name="ppo_trainer", version_base=None)
def main(config):
"""Main entry point for PPO training with Hydra configuration management.
Args:
config_dict: Hydra configuration dictionary containing training parameters.
"""
print(config)
from verl.utils.config import omega_conf_to_dataclass
profiler_config = omega_conf_to_dataclass(config.critic.profiler)
print(profiler_config)
if __name__ == "__main__":
main()
|
scripts__print_cfg.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import re
import traceback
from pathlib import Path
from typing import Annotated, Optional
import aiofiles
try:
import ujson as json
except ImportError:
import json
import typer
from rich.highlighter import ReprHighlighter
from rich.markdown import Markdown
from rich.table import Table
from rich.text import Text
from textual import on
from textual.app import App, ComposeResult
from textual.containers import Horizontal, Vertical, VerticalScroll
from textual.widgets import Input, ProgressBar, Select, SelectionList, Static
INDEX_KEY = "__IDX"
FILE_SUFFIX = ".jsonl"
def check_textual_version():
# check if textual version is equal to 0.52.1
import textual
from packaging.version import Version
if Version(textual.__version__) != Version("0.52.1"):
raise ImportError(f"Textual version {textual.__version__} is not supported, please pip install textual==0.52.1")
check_textual_version()
async def load_path(p: Path, data: dict, mask_strs: str, idx: int, pbar):
samples = []
async with aiofiles.open(p, encoding="utf-8") as f:
async for line in f:
d = json.loads(line)
for k in d:
if isinstance(d[k], str):
if mask_strs:
d[k] = re.sub(rf"{mask_strs}", "*", d[k])
else:
d[k] = json.dumps(d[k], ensure_ascii=False, indent=4)
d[INDEX_KEY] = len(samples)
samples.append(d)
data[idx] = {"samples": samples}
print(f"path {p} loaded")
pbar.advance(1)
async def load_dir(path: Path, data: dict[int, dict], pbar, mask_strs: str = ""):
paths = list(path.glob(f"*{FILE_SUFFIX}"))
paths = sorted(paths, key=lambda x: int(x.stem))
tasks = [load_path(p, data, mask_strs, i, pbar) for i, p in enumerate(paths)]
await asyncio.gather(*tasks)
class Highlighter(ReprHighlighter):
highlights = ReprHighlighter.highlights + [
r"(?P<tag_name>[][\<\>{}()\|()【】\[\]=`])",
r"\<\|(?P<tag_name>[\w\W]*?)\|\>",
]
def center_word_with_equals_exactly(word: str, total_length: int, char: str = "=") -> str:
if len(word) > total_length:
return word
padding = total_length - len(word)
left_pad = (padding) // 2
right_pad = (padding + 1) // 2
return char * left_pad + " " + word + " " + char * right_pad
def highlight_keyword(content: str, keyword: Optional[str]):
if not keyword:
return Text(content)
text = Text()
parts = content.split(keyword)
for i, part in enumerate(parts):
text.append(part, style=None)
if i < len(parts) - 1:
# text.append(keyword, style=Style(color="#d154d1", bgcolor="yellow", bold=True))
text.append(keyword, style="on #8f51b5")
return text
help_doc = """
⌨️ keybinds:
- `f/esc`: find/cancel
- `tab/←/→`: change focus
- `j/k`: page down/up
- `g/G`: scroll home/end
- `n/N`: next sample/step
- `p/P`: previous sample/step
- `s`: switch display mode
- plain text
- rich table
"""
class JsonLineViewer(App):
BINDINGS = [
("left", "focus_previous", "Focus Previous"),
("right", "focus_next", "Focus Next"),
("s", "swith_render", "switch render"),
# control
("n", "next_sample", "Next Sample"),
("N", "next_step", "Next Step"),
("p", "previous_sample", "Previous Sample"),
("P", "previous_step", "Previous Step"),
# search
("f", "toggle_search", "find"),
("enter", "next_search", "find next"),
("escape", "cancel_search", "cancel find"),
# scroll
("j", "page_down", "page down"),
("k", "page_up", "page up"),
("g", "page_home", "page home"),
("G", "page_end", "page end"),
]
CSS = """
Select:focus > SelectCurrent {
border: tall #8f51b5;
}
Select.-expanded > SelectCurrent {
border: tall #8f51b5;
}
#select-container {
width: 15%;
height: 100%;
align: center top;
}
#search-container {
height: 10%;
align: center top;
}
#search-box {
width: 50%;
}
#reqid-box {
width: 50%;
}
"""
def __init__(self, step_num: int, data: dict[int, dict], pbar):
super().__init__()
self.step_num = step_num
self.data = data
self.render_table = False
self.selected_step_index = 0
self.selected_sample_index = 0
self.pbar = pbar
self.matches = []
self.current_match_index = 0
self.highlighter = Highlighter()
first_samples = data[list(data.keys())[0]]["samples"]
# Prepare the initial field filter list (all keys from the first sample)
self.filter_fields = [(f, f, True) for f in first_samples[0].keys()]
# Internal set used for fast membership checks when we add new fields on the fly.
# We keep it here so that when new columns appear in later steps (e.g. `request_id`),
# they can be added to the UI automatically without restarting the viewer.
self._field_set: set[str] = set(first_samples[0].keys())
self.sample_num = len(first_samples)
def compose(self) -> ComposeResult:
with Horizontal(id="search-container"):
yield Input(placeholder="find something...", id="search-box")
yield Input(placeholder="request id...", id="reqid-box")
with Vertical(id="search-container2"):
yield self.pbar
yield Static("", id="search-status")
with Horizontal():
with Vertical(id="select-container"):
yield Static("\n")
yield Static(
renderable=Markdown(
help_doc,
),
markup=False,
)
yield Static("\n")
yield Select(
id="step-select",
value=0,
prompt="select step",
options=[("step: 1", 0)],
allow_blank=False,
)
yield Select(
id="sample-select",
value=0,
prompt="select sample",
options=[("sample: 1", 0)],
allow_blank=False,
)
yield Select(
id="sample-sort",
value=0,
prompt="排序",
options=[
("sort", 0),
("score asc", 1),
("score desc", 2),
],
allow_blank=False,
)
yield SelectionList[int](("Select ALL", 1, True), id="fields-select-all")
with VerticalScroll(id="scroll-view2"):
yield SelectionList[str](*self.filter_fields, id="fields-select")
with VerticalScroll(id="scroll-view"):
yield Static(id="content", markup=False)
async def on_mount(self) -> None:
self.step_select = self.query_one("#step-select", Select)
self.sample_select = self.query_one("#sample-select", Select)
self.sample_sort = self.query_one("#sample-sort", Select)
self.content_display = self.query_one("#content", Static)
self.search_box = self.query_one("#search-box", Input)
self.reqid_box = self.query_one("#reqid-box", Input)
self.scroll_view = self.query_one("#scroll-view", VerticalScroll)
self.search_status = self.query_one("#search-status", Static)
self.fields_select = self.query_one("#fields-select", SelectionList)
self.fields_select.border_title = "field filter"
if self.data:
self.step_select.set_options([(f"step: {i + 1}", i) for i in range(self.step_num)])
self.sample_select.set_options([(f"sample: {i + 1}", i) for i in range(self.sample_num)])
self.step_select.focus()
await self.update_content()
def update_result_options(self, offset: int = 0, sort_desc: Optional[bool] = None):
options = []
if isinstance(self.selected_step_index, int) and self.selected_step_index < len(self.data):
if self.sample_num is None or sort_desc is not None:
samples = self.data[self.selected_step_index].get("samples", [])
if not samples:
self.selected_sample_index = offset
return
if sort_desc is not None:
samples = sorted(
samples,
key=lambda x: x.get("score", x.get("score_1", 0)),
reverse=sort_desc,
)
options = [(f"sample: {r[INDEX_KEY] + 1}", r[INDEX_KEY]) for r in samples]
self.sample_select.set_options(options)
self.sample_num = len(samples)
if sort_desc is not None and options:
self.selected_sample_index = options[0][1]
else:
self.selected_sample_index = offset
async def update_content(self, search_keyword: Optional[str] = None):
content = ""
try:
samples = self.data[self.selected_step_index].get("samples", [])
content_dict_full = samples[self.selected_sample_index]
# Dynamically track any NEW keys that appear and add them to the field filter.
self._update_fields_select(content_dict_full.keys())
# Apply field selection filter (only show selected fields)
content_dict = {k: v for k, v in content_dict_full.items() if k in self.fields_select.selected}
if self.render_table:
content = Table("key", "value", show_lines=True)
for k in content_dict:
v = content_dict[k]
v = f"{v}"
content.add_row(
k,
self.highlighter(highlight_keyword(v, search_keyword)),
)
else:
text = Text()
for k in content_dict:
v = content_dict[k]
s = center_word_with_equals_exactly(k, 64) + f"\n{v}\n"
text.append(highlight_keyword(s, search_keyword))
content = self.highlighter(text)
except KeyError:
content = f"Loading data asynchronously, progress: {len(self.data)}/{self.step_num} step"
except Exception:
content = self.highlighter(traceback.format_exc())
self.content_display.update(content)
# ---------------------------------------------------------------------
# Request-ID jump logic
# ---------------------------------------------------------------------
@on(Input.Submitted, "#reqid-box")
async def on_reqid_submitted(self, event: Input.Submitted) -> None:
"""Jump to the sample that has a matching `request_id`."""
req_id_raw = event.value.strip()
# Remove hyphens so search is tolerant to different id formats
req_id = req_id_raw.replace("-", "")
if not req_id:
return
found = False
for step_idx, step_data in self.data.items():
for sample in step_data.get("samples", []):
sample_id = str(sample.get("request_id", ""))
if sample_id.replace("-", "") == req_id:
# Update selected indices
self.selected_step_index = step_idx
self.step_select.value = step_idx
# Ensure sample list is updated and select sample
self.update_result_options(offset=sample[INDEX_KEY])
self.selected_sample_index = sample[INDEX_KEY]
self.sample_select.value = sample[INDEX_KEY]
await self._clear_search()
await self.update_content()
found = True
break
if found:
break
if not found:
self.search_status.update(Text(f"request_id '{req_id_raw}' not found", style="bold red"))
else:
# Keep the typed id in the input box so users see what was searched.
pass
# ---------------------------------------------------------------------
# Helper: add new fields to SelectionList on-the-fly
# ---------------------------------------------------------------------
def _update_fields_select(self, keys):
"""Add any unseen *keys* to the field-selection widget so they can be toggled.
The viewer is often launched with only the first step loaded. Later steps may
introduce new columns (e.g. `request_id`). This helper ensures those fields
become visible without requiring a restart.
"""
# Ensure we have the widget (only after on_mount)
if not hasattr(self, "fields_select"):
return
for k in keys:
if k not in self._field_set:
self._field_set.add(k)
try:
# By default, new fields are selected so they appear immediately.
self.fields_select.add_option(k, k, selected=True)
except Exception:
# Fallback for older textual versions where signature is different.
self.fields_select.add_option((k, k, True))
@on(Select.Changed, "#step-select")
async def step_changed(self, event):
self.selected_step_index = event.value
self.update_result_options()
await self.update_content()
@on(Select.Changed, "#sample-select")
async def sample_changed(self, event):
self.selected_sample_index = event.value
await self._clear_search()
await self.update_content()
@on(Select.Changed, "#sample-sort")
async def sort_changed(self, event):
v = event.value
self.update_result_options(sort_desc=None if v == 0 else False if v == 1 else True)
await self.update_content()
@on(SelectionList.SelectedChanged, "#fields-select")
async def fields_changed(self, event):
await self.update_content()
@on(SelectionList.SelectedChanged, "#fields-select-all")
async def fields_all_changed(self, event):
s = self.query_one("#fields-select-all", SelectionList)
if s.selected:
self.fields_select.select_all()
else:
self.fields_select.deselect_all()
def action_focus_previous(self):
self.screen.focus_previous()
def action_focus_next(self):
self.screen.focus_next()
async def action_next_step(self) -> None:
self.selected_step_index += 1
if self.selected_step_index >= self.step_num:
self.selected_step_index = 0
self.step_select.value = self.selected_step_index
self.update_result_options()
await self.update_content()
async def action_next_sample(self) -> None:
self.selected_sample_index += 1
if not self.sample_num or self.selected_sample_index >= self.sample_num:
self.selected_sample_index = 0
self.sample_select.value = self.selected_sample_index
await self._clear_search()
await self.update_content()
async def action_previous_step(self) -> None:
self.selected_step_index -= 1
if self.selected_step_index < 0:
self.selected_step_index = self.step_num - 1
self.step_select.value = self.selected_step_index
self.update_result_options()
await self.update_content()
async def action_previous_sample(self) -> None:
self.selected_sample_index -= 1
if self.selected_sample_index < 0:
self.selected_sample_index = self.sample_num - 1
self.sample_select.value = self.selected_sample_index
await self._clear_search()
await self.update_content()
async def action_swith_render(self):
self.render_table = not self.render_table
await self.update_content()
def action_toggle_search(self) -> None:
self.search_box.focus()
async def action_cancel_search(self) -> None:
self.search_box.value = ""
await self._clear_search()
await self.update_content()
async def _clear_search(self):
self.matches = []
self.search_status.update("")
self.current_match_index = 0
@on(Input.Submitted, "#search-box")
async def on_search_submitted(self, event: Input.Submitted) -> None:
self.matches = []
self.current_match_index = 0
if event.value:
await self.update_content(event.value)
renderable = self.content_display.render()
if isinstance(renderable, Table):
return
assert isinstance(renderable, Text)
console = self.content_display._console
lines = renderable.wrap(console, self.scroll_view.container_size.width)
line_idx_recorded = set()
for line_idx, line in enumerate(lines):
if line_idx in line_idx_recorded:
continue
if event.value in line:
self.matches.append(
{
"line": line_idx,
"word": event.value,
}
)
line_idx_recorded.add(line_idx)
self.scroll_view.focus()
await self.action_next_search()
async def action_next_search(self) -> None:
if not self.matches or self.current_match_index >= len(self.matches):
return
target_line = self.matches[self.current_match_index]["line"]
self.scroll_view.scroll_to(x=0, y=target_line * 1, animate=False)
self.current_match_index = (self.current_match_index + 1) % len(self.matches)
self.search_status.update(
Text(
f"Find :{self.current_match_index + 1}/{len(self.matches)}",
style="bold on #8f51b5",
)
)
def action_page_up(self):
self.scroll_view.scroll_page_up(animate=False)
def action_page_down(self):
self.scroll_view.scroll_page_down(animate=False)
def action_page_home(self):
self.scroll_view.scroll_home(animate=False)
def action_page_end(self):
self.scroll_view.scroll_end(animate=False)
async def _run(path: Path, mask_str: str):
assert path.exists(), f"{path} not exist"
paths = list(path.glob(f"*{FILE_SUFFIX}"))
paths = sorted(paths, key=lambda x: int(x.stem))
if not paths:
raise ValueError(f"no available reward dump files under f{path}")
print(f"get jsonl file nums: {len(paths)}")
pbar = ProgressBar(total=len(paths), name="data load progress")
data = {}
await load_path(paths[0], data, mask_str, 0, pbar)
app = JsonLineViewer(step_num=len(paths), data=data, pbar=pbar)
await asyncio.gather(load_dir(path, data, pbar, mask_str), app.run_async())
app = typer.Typer()
@app.command(help="launch TUI APP")
def run(
rollout_data_dir: Path,
mask_str: Annotated[str, typer.Option(help="string that will be masked to *")] = r"<\|image_pad\|>|<\|imgpad\|>",
):
loop = asyncio.get_event_loop()
loop.run_until_complete(_run(rollout_data_dir, mask_str))
if __name__ == "__main__":
app()
|
scripts__rollout_viewer.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Merge individual MoE expert weights into stacked tensors for efficient loading.
This script takes a HuggingFace checkpoint with individual expert weights
(e.g., model.layers.{i}.mlp.experts.{j}.gate_proj.weight) and merges them
into stacked tensors (e.g., model.layers.{i}.mlp.experts.gate_proj) for
faster loading and better memory efficiency in VeOmni.
The merging process:
1. Loads individual expert weights from the HF checkpoint
2. Stacks them into single tensors for each projection type
3. Handles all three projection types: gate_proj, up_proj, down_proj
4. Supports both Qwen3-MoE (num_experts) and DeepSeek (n_routed_experts) formats
5. Handles models with initial dense layers (first_k_dense_replace)
Usage: python moe_merge.py --raw_hf_path <input_checkpoint> --merge_hf_path <output_dir>
"""
import os
from argparse import ArgumentParser
from dataclasses import dataclass
from glob import glob
from typing import Generator
import torch
from safetensors.torch import safe_open
from tqdm import tqdm
from transformers import AutoConfig
from veomni.models import build_tokenizer, save_model_weights
@dataclass
class StateDictIterator:
filepath: str
def __iter__(self) -> Generator[tuple[str, "torch.Tensor"], None, None]:
if self.filepath.endswith(".safetensors"):
with safe_open(self.filepath, framework="pt", device="cpu") as f:
for key in f.keys():
yield key, f.get_tensor(key)
else:
state_dict = torch.load(self.filepath, map_location="cpu", weights_only=True, mmap=True)
for key in state_dict.keys():
yield key, state_dict[key]
def main(raw_hf_path, merge_hf_path):
torch.set_default_dtype(torch.bfloat16)
os.makedirs(merge_hf_path, exist_ok=True)
config = AutoConfig.from_pretrained(raw_hf_path)
tokenizer = build_tokenizer(raw_hf_path)
safetensor_files = list(glob(os.path.join(raw_hf_path, "*.safetensors")))
safetensor_files.sort()
state_dict_iterators = [StateDictIterator(shard_file) for shard_file in safetensor_files]
new_state_dict = {}
for state_dict_iterator in tqdm(state_dict_iterators, desc="Loading checkpoint shards"):
for name, tensor in state_dict_iterator:
new_state_dict[name] = tensor.cpu()
print(new_state_dict.keys())
if hasattr(config, "num_experts"):
# qwen3moe
num_experts = config.num_experts
elif hasattr(config, "n_routed_experts"):
# deepseek
num_experts = config.n_routed_experts
else:
raise RuntimeError("could not find how many experts to assign")
num_hidden_layers = config.num_hidden_layers
if hasattr(config, "first_k_dense_replace"):
# deepseek first k dense layer
moe_layer_start_idx = config.first_k_dense_replace
else:
# moe layer only in the model
moe_layer_start_idx = 0
for i in range(moe_layer_start_idx, num_hidden_layers):
gate_proj = []
for j in range(num_experts):
gate_proj.append(new_state_dict.pop(f"model.layers.{i}.mlp.experts.{j}.gate_proj.weight"))
new_state_dict[f"model.layers.{i}.mlp.experts.gate_proj"] = torch.stack(gate_proj)
up_proj = []
for j in range(num_experts):
up_proj.append(new_state_dict.pop(f"model.layers.{i}.mlp.experts.{j}.up_proj.weight"))
new_state_dict[f"model.layers.{i}.mlp.experts.up_proj"] = torch.stack(up_proj)
down_proj = []
for j in range(num_experts):
down_proj.append(new_state_dict.pop(f"model.layers.{i}.mlp.experts.{j}.down_proj.weight"))
new_state_dict[f"model.layers.{i}.mlp.experts.down_proj"] = torch.stack(down_proj)
model_assets = [config, tokenizer]
save_model_weights(merge_hf_path, new_state_dict, model_assets=model_assets)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--raw_hf_path", type=str, required=True)
parser.add_argument("--merge_hf_path", type=str, required=True)
args = parser.parse_args()
main(args.raw_hf_path, args.merge_hf_path)
|
scripts__veomni__moe_merge.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Reverse process of moe_merge.py - splits merged MoE expert weights back to individual experts.
This script takes a HF checkpoint that has been processed by moe_merge.py (where expert weights
are stacked into single tensors) and splits them back to the original format with individual
expert weights.
The process reverses the merging by:
1. Loading stacked tensors like model.layers.{i}.mlp.experts.gate_proj
2. Unstacking them back to individual experts model.layers.{i}.mlp.experts.{j}.gate_proj.weight
3. Handling all three projection types: gate_proj, up_proj, down_proj
Usage: python moe_split.py --merge_hf_path <merged_checkpoint> --split_hf_path <output_dir>
"""
import os
from argparse import ArgumentParser
from dataclasses import dataclass
from glob import glob
from typing import Generator
import torch
from safetensors.torch import safe_open
from tqdm import tqdm
from transformers import AutoConfig
from veomni.models import build_tokenizer, save_model_weights
@dataclass
class StateDictIterator:
filepath: str
def __iter__(self) -> Generator[tuple[str, "torch.Tensor"], None, None]:
if self.filepath.endswith(".safetensors"):
with safe_open(self.filepath, framework="pt", device="cpu") as f:
for key in f.keys():
yield key, f.get_tensor(key)
else:
state_dict = torch.load(self.filepath, map_location="cpu", weights_only=True, mmap=True)
for key in state_dict.keys():
yield key, state_dict[key]
def main(merge_hf_path, split_hf_path):
torch.set_default_dtype(torch.bfloat16)
os.makedirs(split_hf_path, exist_ok=True)
config = AutoConfig.from_pretrained(merge_hf_path)
tokenizer = build_tokenizer(merge_hf_path)
safetensor_files = list(glob(os.path.join(merge_hf_path, "*.safetensors")))
safetensor_files.sort()
state_dict_iterators = [StateDictIterator(shard_file) for shard_file in safetensor_files]
new_state_dict = {}
for state_dict_iterator in tqdm(state_dict_iterators, desc="Loading checkpoint shards"):
for name, tensor in state_dict_iterator:
new_state_dict[name] = tensor.cpu()
num_experts = config.num_experts
num_hidden_layers = config.num_hidden_layers
for i in range(num_hidden_layers):
print(f"Converting layer {i}")
for proj_name in ["gate_proj", "up_proj", "down_proj"]:
stacked_key = f"model.layers.{i}.mlp.experts.{proj_name}"
if stacked_key in new_state_dict:
stacked_tensor = new_state_dict.pop(stacked_key)
for j in range(num_experts):
expert_key = f"model.layers.{i}.mlp.experts.{j}.{proj_name}.weight"
new_state_dict[expert_key] = stacked_tensor[j]
model_assets = [config, tokenizer]
print("Saving to safetensors")
save_model_weights(split_hf_path, new_state_dict, model_assets=model_assets)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--merge_hf_path", type=str, required=True)
parser.add_argument("--split_hf_path", type=str, required=True)
args = parser.parse_args()
main(args.merge_hf_path, args.split_hf_path)
|
scripts__veomni__moe_split.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# setup.py is the fallback installation script when pyproject.toml does not work
import os
from pathlib import Path
from setuptools import find_packages, setup
version_folder = os.path.dirname(os.path.join(os.path.abspath(__file__)))
with open(os.path.join(version_folder, "verl/version/version")) as f:
__version__ = f.read().strip()
install_requires = [
"accelerate",
"codetiming",
"datasets",
"dill",
"hydra-core",
"numpy<2.0.0",
"pandas",
"peft",
"pyarrow>=19.0.0",
"pybind11",
"pylatexenc",
"ray[default]>=2.41.0",
"torchdata",
"tensordict>=0.8.0,<=0.10.0,!=0.9.0",
"transformers",
"wandb",
"packaging>=20.0",
"tensorboard",
]
TEST_REQUIRES = ["pytest", "pre-commit", "py-spy", "pytest-asyncio", "pytest-rerunfailures"]
PRIME_REQUIRES = ["pyext"]
GEO_REQUIRES = ["mathruler", "torchvision", "qwen_vl_utils"]
GPU_REQUIRES = ["liger-kernel", "flash-attn"]
MATH_REQUIRES = ["math-verify"] # Add math-verify as an optional dependency
VLLM_REQUIRES = ["tensordict>=0.8.0,<=0.10.0,!=0.9.0", "vllm>=0.8.5,<=0.12.0"]
TRTLLM_REQUIRES = ["tensorrt-llm>=1.2.0rc6"]
SGLANG_REQUIRES = [
"tensordict>=0.8.0,<=0.10.0,!=0.9.0",
"sglang[srt,openai]==0.5.6",
"torch==2.9.1",
]
TRL_REQUIRES = ["trl<=0.9.6"]
MCORE_REQUIRES = ["mbridge"]
TRANSFERQUEUE_REQUIRES = ["TransferQueue==0.1.5"]
extras_require = {
"test": TEST_REQUIRES,
"prime": PRIME_REQUIRES,
"geo": GEO_REQUIRES,
"gpu": GPU_REQUIRES,
"math": MATH_REQUIRES,
"vllm": VLLM_REQUIRES,
"sglang": SGLANG_REQUIRES,
"trl": TRL_REQUIRES,
"mcore": MCORE_REQUIRES,
"transferqueue": TRANSFERQUEUE_REQUIRES,
"trtllm": TRTLLM_REQUIRES,
}
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
setup(
name="verl",
version=__version__,
package_dir={"": "."},
packages=find_packages(where="."),
url="https://github.com/volcengine/verl",
license="Apache 2.0",
author="Bytedance - Seed - MLSys",
author_email="zhangchi.usc1992@bytedance.com, gmsheng@connect.hku.hk",
description="verl: Volcano Engine Reinforcement Learning for LLM",
install_requires=install_requires,
extras_require=extras_require,
package_data={
"": ["version/*"],
"verl": ["trainer/config/*.yaml"],
},
include_package_data=True,
long_description=long_description,
long_description_content_type="text/markdown",
)
|
setup.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from dataclasses import FrozenInstanceError, dataclass, fields
from typing import Any
# BaseConfig class inherits from collections.abc.Mapping, which means it can act like a dictionary
@dataclass
class BaseConfig(collections.abc.Mapping):
"""The BaseConfig provides dict-like interface for a dataclass config.
By default all fields in the config is not mutable, unless specified in
"_mutable_fields". The BaseConfig class implements the Mapping Abstract Base Class.
This allows instances of this class to be used like dictionaries.
"""
_mutable_fields = set()
_target_: str = ""
def __setattr__(self, name: str, value):
"""Set the value of an attribute. Check if the attr is mutable before setting the value."""
# If the field already exists, it's considered frozen unless it's in _mutable_fields
if name in self.__dict__ and name not in getattr(self, "_mutable_fields", set()):
raise FrozenInstanceError(f"Field '{name}' is frozen and cannot be modified")
super().__setattr__(name, value)
def get(self, key: str, default: Any = None) -> Any:
"""Get the value associated with the given key. If the key does not exist, return the default value.
Args:
key (str): The attribute name to retrieve.
default (Any, optional): The value to return if the attribute does not exist. Defaults to None.
Returns:
Any: The value of the attribute or the default value.
"""
try:
return getattr(self, key)
except AttributeError:
return default
def __getitem__(self, key: str):
"""Implement the [] operator for the class. Allows accessing attributes like dictionary items.
Args:
key (str): The attribute name to retrieve.
Returns:
Any: The value of the attribute.
Raises:
AttributeError: If the attribute does not exist.
TypeError: If the key type is not string
"""
return getattr(self, key)
def __iter__(self):
"""Implement the iterator protocol. Allows iterating over the attribute names of the instance.
Yields:
str: The name of each field in the dataclass.
"""
for f in fields(self):
yield f.name
def __len__(self):
"""
Return the number of fields in the dataclass.
Returns:
int: The number of fields in the dataclass.
"""
return len(fields(self))
|
verl__base_config.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from abc import ABC, abstractmethod
from typing import Any, Generator, TypedDict
import ray
import torch
from verl.single_controller.base import Worker
from verl.single_controller.base.decorator import Dispatch, register
from verl.single_controller.ray import RayClassWithInitArgs, RayWorkerGroup
from verl.utils.distributed import initialize_global_process_group_ray
from verl.utils.ray_utils import auto_await
from verl.workers.config import HFModelConfig, RolloutConfig
from verl.workers.rollout import BaseRollout, RolloutReplica, get_rollout_class
class TensorMeta(TypedDict):
name: str
shape: torch.Size
dtype: torch.dtype
offset: int
class CheckpointEngineRegistry:
"""Checkpoint engine registry."""
_registry: dict[str, type["CheckpointEngine"]] = {}
def register(backend: str):
"""Register a checkpoint engine.
Args:
backend: The backend of the checkpoint engine.
"""
def wrapper(cls: type["CheckpointEngine"]):
CheckpointEngineRegistry._registry[backend] = cls
return cls
return wrapper
@classmethod
def get(cls, backend: str) -> type["CheckpointEngine"]:
"""Get the checkpoint engine class.
Args:
backend: The backend of the checkpoint engine.
Returns:
The checkpoint engine class.
"""
return cls._registry[backend]
@classmethod
def new(cls, backend: str, *args, **kwargs) -> "CheckpointEngine":
"""Create a new checkpoint engine instance.
Args:
backend: The backend of the checkpoint engine.
*args: Variable length argument pass to the checkpoint engine constructor.
**kwargs: Arbitrary keyword arguments pass to the checkpoint engine constructor.
Returns:
A new checkpoint engine instance.
"""
if backend not in cls._registry:
raise ValueError(f"Checkpoint engine {backend} not registered")
return cls._registry[backend](*args, **kwargs)
class CheckpointEngine(ABC):
"""CheckpointEngine is an abstraction to transfer weights from trainer to rollout.
In trainer process:
>>> trainer = EngineRegistry.new(...) # FSDP, Megatron, VeOmini, TorchTitan, ...
>>> engine = CheckpointEngine.new(...) # NCCLCheckpointEngine, NIXLCheckpointEngine, ...
>>> await engine.send_weights(trainer.get_per_tensor_param())
In rollout process:
>>> engine = CheckpointEngine.new(...)
>>> server_adapter = ServerAdapter()
>>> await server_adapter.update_weights(engine.get_weights()) # update weights via cuda ipc
"""
@abstractmethod
def prepare(self) -> dict[str, Any]:
"""Prepare checkpoint engine before each step send_weights/receive_weights.
1. Allocate weight bucket.
2. [Optional] Register weight bucket for RDMA.
3. Return metadata to build communication topology: master ip:port, register RDMA description, etc.
Args:
worker_group: The worker group that the checkpoint engine will be used.
Returns:
A dictionary that contains the metadata of the worker group.
"""
raise NotImplementedError
@classmethod
@abstractmethod
def build_topology(
cls, trainer_world_size: int, rollout_world_size: int, metadata: list[dict]
) -> tuple[dict[str, list[Any]], dict[str, list[Any]]]:
"""Build communication topology between all workers.
Args:
trainer_world_size: The world size of the trainer worker group.
rollout_world_size: The world size of the rollout replica.
metadata: A list of metadata `prepare` from all workers.
Returns:
A tuple of two dictionaries that contains the communication topology for trainer and rollout worker group.
Each dict value should be a list argument equal to the world size of the worker group to dispatch to
`init_process_group`.
```
world_size = rollout.world_size + trainer.world_size
kwargs = {
"rank": list(range(world_size)),
"world_size": [world_size] * world_size,
"master_metadata": [metadata[0]] * world_size,
}
```
"""
raise NotImplementedError
@abstractmethod
def init_process_group(self, **kwargs):
"""Init process group for checkpoint engine.
Args:
**kwargs: Keyword arguments from `build_topology`.
"""
raise NotImplementedError
@abstractmethod
def finalize(self):
"""Finalize checkpoint engine after each step send_weights/receive_weights.
1. Free weight bucket.
1. [Optional] Deregister weight bucket for RDMA.
2. [Optional] Destroy process group.
"""
raise NotImplementedError
@abstractmethod
async def send_weights(self, weights: Generator[tuple[str, torch.Tensor], None, None]):
"""Send the weights of the model.
Args:
weights: A generator that yields the name of the weight tensor and the tensor itself.
"""
raise NotImplementedError
@abstractmethod
async def receive_weights(self) -> Generator[tuple[str, torch.Tensor], None, None]:
"""Receive the weights of the model.
Yields:
A tuple of the name of the weight tensor and the tensor itself.
"""
raise NotImplementedError
class CheckpointEngineWithCache(CheckpointEngine):
"""Checkpoint engine with local cache: shm, disk, etc. This allow to synchronize weights without interrupting
rollout ongoing requests (partial rollout). After requests exhausted, rollout can get weights from local cache.
Laminar: https://arxiv.org/abs/2510.12633
"""
@abstractmethod
async def get_weights(self) -> Generator[tuple[str, torch.Tensor], None, None]:
"""Get the weights of the model from local cache.
Yields:
A tuple of the name of the weight tensor and the tensor itself.
"""
raise NotImplementedError
@CheckpointEngineRegistry.register("naive")
class ColocatedCheckpointEngine(CheckpointEngine):
"""Checkpoint engine for trainer and rollout colocated on same GPU.
In trainer process:
>>> engine = ColocatedCheckpointEngine()
>>> trainer = Trainer()
>>> server_adapter = ServerAdapter()
>>> engine.send_weights(trainer.get_per_tensor_param())
>>> server_adapter.update_weights(engine.receive_weights())
"""
def __init__(self, bucket_size: int, is_master: bool = False) -> None:
self.bucket_size = bucket_size
self.is_master = is_master
def prepare(self):
raise NotImplementedError
def init_process_group(self, **kwargs):
raise NotImplementedError
def finalize(self):
raise NotImplementedError
@classmethod
def build_topology(cls, *args, **kwargs):
raise NotImplementedError
def send_weights(self, weights: Generator[tuple[str, torch.Tensor], None, None]):
"""Send the weights of the model.
Args:
weights: A generator that yields the name of the weight tensor and the tensor itself.
"""
self.weights = weights
def receive_weights(self) -> Generator[tuple[str, torch.Tensor], None, None]:
"""Receive the weights of the model.
Yields:
A tuple of the name of the weight tensor and the tensor itself.
"""
yield from self.weights
self.weights = None
class CheckpointEngineWorker(Worker):
"""CheckpointEngineWorker colocated with inference engine's WorkerProc on same GPU.
Args:
rollout_config: The rollout configuration.
model_config: The model configuration.
server_adapter: The server adapter to update weights.
"""
def __init__(
self,
rollout_config: RolloutConfig,
model_config: HFModelConfig,
server_adapter: BaseRollout = None,
) -> None:
self.rollout_config = rollout_config
self.model_config = model_config
# sglang and trt-llm need device_mesh for internal communication
initialize_global_process_group_ray(timeout_second=None, backend="cpu:gloo")
self.server_adapter: BaseRollout = server_adapter or get_rollout_class(
rollout_config.name, rollout_config.mode
)(config=rollout_config, model_config=model_config, device_mesh=None)
backend = rollout_config.checkpoint_engine.backend
bucket_size = rollout_config.checkpoint_engine.update_weights_bucket_megabytes << 20
engine_kwargs = rollout_config.checkpoint_engine.engine_kwargs.get(backend, {})
self.checkpoint_engine = CheckpointEngineRegistry.new(backend, bucket_size=bucket_size, **engine_kwargs)
@register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False)
async def update_weights(self):
weights = self.checkpoint_engine.receive_weights()
await self.server_adapter.update_weights(weights)
@register(dispatch_mode=Dispatch.DP_COMPUTE, blocking=False)
def execute_checkpoint_engine(self, method: str, *args, **kwargs):
return getattr(self.checkpoint_engine, method)(*args, **kwargs)
_worker_cls = ray.remote(CheckpointEngineWorker)
class CheckpointEngineManager:
"""Checkpoint engine manager to coordinate weight synchronization between trainer and rollout replicas.
- ME: model engine, FSDP, MCore, VeOmni, export full tensor generator `get_per_tensor_param`
- CE: checkpoint engine, NCCL, NIXL, etc
In trainer, model engine and checkpoint engine are in same process.
In rollout, checkpoint engine and rollout worker are in separate process, update weights via cuda ipc.
```
┌────────┬────────┬─────┬────────┐ ┌───────────────────┬───────────────────┐
│ ┌────┐ │ ┌────┐ │ │ ┌────┐ │ │ Replica 0 │ Replica 1 │
│ │ ME0│ │ │ ME1│ │ │ │ MEn│ │ ├────┬────┬────┬────┼────┬────┬────┬────┤
│ └──┬─┘ │ └────┘ │ ... │ └────┘ │ │ 0 │ 1 │ 2 │ 3 │ 0 │ 1 │ 2 │ 3 │
│ v | | | | └──┬─┴──┬─┴──┬─┴──┬─┴──┬─┴──┬─┴──┬─┴──┬─┘
| ┌──┴─┐ │ ┌────┐ │ │ ┌────┐ │ ^ ^ ^ cuda ipc ^ ^ ^
│ │ CE │ │ │ CE │ │ │ │ CE │ │ ┌──┴─┬──┴─┬──┴─┬──┴─┬──┴─┬──┴─┬──┴─┬──┴─┐
│ └──┬─┘ │ └────┘ │ │ └────┘ │ │ CE │ CE │ CE │ CE │ CE │ CE │ CE │ CE |
└────┼───┴────────┴─────┴────────┘ └──┬─┴──┬─┴──┬─┴──┬─┴──┬─┴──┬─┴──┬─┴──┬─┘
v | | | | | | | |
└─────────────(nccl/nixl/..)─────────────┴────┴────┴────┴────┴────┴────┴────┘
```
Args:
backend: The checkpoint engine backend.
trainer: The trainer worker group.
replicas: The list of rollout replicas.
"""
def __init__(
self,
backend: str,
trainer: RayWorkerGroup,
replicas: list[RolloutReplica],
) -> None:
self.backend = backend
self.backend_cls = CheckpointEngineRegistry.get(backend)
self.trainer = trainer
self.replicas = replicas
def build_process_group(self, rollout: RayWorkerGroup):
"""Build process group for trainer and rollout replicas."""
trainer = self.trainer
# 1. prepare all workers
metadata = ray.get(
trainer.execute_checkpoint_engine(["prepare"] * trainer.world_size)
+ rollout.execute_checkpoint_engine(["prepare"] * rollout.world_size)
)
# 2. build communication topology between all workers
trainer_kwargs, rollout_kwargs = self.backend_cls.build_topology(
trainer.world_size, rollout.world_size, metadata
)
for k, v in trainer_kwargs.items():
assert len(v) == trainer.world_size, f"trainer_kwargs[{k}] must have length of {trainer.world_size}"
for k, v in rollout_kwargs.items():
assert len(v) == rollout.world_size, f"rollout_kwargs[{k}] must have length of {rollout.world_size}"
trainer_kwargs["method"] = ["init_process_group"] * trainer.world_size
rollout_kwargs["method"] = ["init_process_group"] * rollout.world_size
# 3. init process group between all workers
ray.get(
trainer.execute_checkpoint_engine(**trainer_kwargs) + rollout.execute_checkpoint_engine(**rollout_kwargs)
)
def add_replicas(self, replicas: list[RolloutReplica]):
"""Add rollout replicas to the manager for elastic scale up, will rebuild process group.
Args:
replicas: The list of rollout replicas to add.
"""
self.replicas.extend(replicas)
def remove_replicas(self, replicas: list[RolloutReplica]):
"""Remove rollout replicas from the manager for elastic scale down, will rebuild process group.
Args:
replicas: The list of rollout replicas to remove.
"""
replicas_set = set(replicas)
self.replicas = [r for r in self.replicas if r not in replicas_set]
@auto_await
async def sleep_replicas(self):
"""Sleep all rollout replicas: free weight and kv_cache device memory."""
# skip sleep replicas for disaggregated rollout
if self.backend != "naive":
return
await asyncio.gather(*[r.sleep() for r in self.replicas])
@auto_await
async def update_weights(self):
"""Update weights from trainer to rollout replicas."""
# 0. update weights for sync training with colocated trainer and rollout
if self.backend == "naive":
ray.get(self.trainer.update_weights())
return
# 1. abort and save all unfinished requests for partial rollout
await asyncio.gather(*[r.abort_all_requests() for r in self.replicas])
# 2. create a temporay worker group for all replicas
workers = []
for replica in self.replicas:
workers.extend(replica.workers)
rollout = RayWorkerGroup(worker_handles=workers, ray_cls_with_init=RayClassWithInitArgs(cls=_worker_cls))
trainer = self.trainer
# 3. build process group
self.build_process_group(rollout)
# 4. update weights of all workers
ray.get(trainer.update_weights() + rollout.update_weights())
# 5. finalize all workers
ray.get(
trainer.execute_checkpoint_engine(["finalize"] * trainer.world_size)
+ rollout.execute_checkpoint_engine(["finalize"] * rollout.world_size)
)
# 6. resume all unfinished requests for partial rollout
await asyncio.gather(*[r.resume_all_requests() for r in self.replicas])
|
verl__checkpoint_engine__base.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import os
import time
from dataclasses import dataclass
from typing import AsyncGenerator, Generator
import ray
import torch
import zmq
from vllm.distributed.utils import StatelessProcessGroup
from verl.checkpoint_engine.base import CheckpointEngine, CheckpointEngineRegistry, TensorMeta
from verl.utils.distributed import stateless_init_process_group
from verl.utils.net_utils import get_free_port, is_valid_ipv6_address
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
@dataclass
class MasterMetadata:
zmq_ip: str
zmq_port: int
dist_ip: str
dist_port: int
class BroadcastOperation:
"""Async broadcast operation with HCCL in separate thread.
Args:
rank (int): The rank of the current process.
group_name (str): The name of the HCCL process group.
bucket (torch.Tensor): The tensor to broadcast.
metadata (dict[str, TensorMeta]): The metadata of the tensor.
socket (zmq.Socket): The zeromq socket to communicate with master.
topic (str): The topic to subscribe.
"""
def __init__(
self,
rank: int,
process_group: StatelessProcessGroup | str,
bucket: torch.Tensor,
metadata: dict[str, TensorMeta],
socket: zmq.Socket,
topic: str,
) -> None:
self.rank = rank
self.pyhccl = process_group
self.bucket = bucket
self.metadata = metadata
self.socket = socket
self.topic = topic
loop = asyncio.get_running_loop()
self._task = loop.run_in_executor(None, self._run)
def _run(self):
# broadcast tensor meta via zeromq PUB/SUB
if self.rank == 0:
self.socket.send_string(self.topic, flags=zmq.SNDMORE)
self.socket.send_pyobj(self.metadata)
else:
self.socket.recv_string()
self.metadata = self.socket.recv_pyobj()
# broadcast tensor via HCCL
self.pyhccl.broadcast(self.bucket, src=0)
async def wait_for_complete(self) -> dict[str, TensorMeta]:
"""Wait for the broadcast operation to complete.
Returns:
dict[str, TensorMeta]: The bucket meta after broadcast.
"""
await self._task
return self.metadata
@CheckpointEngineRegistry.register("hccl")
class HCCLCheckpointEngine(CheckpointEngine):
"""HCCL checkpoint engine with collective communication.
Args:
bucket_size (int): Bucket size in bytes to transfer multiple weights at one time. Note that we use
two buffer to send and recv weights at same time, so the device memory overhead is 2 * bucket_size.
group_name (str): The name of the HCCL process group. Defaults to "default".
rebuild_group (bool): Whether to rebuild the HCCL process group in each update. Defaults to False.
is_master (bool): Whether the current process is the master process. Defaults to False.
rollout_dtype (torch.dtype): The dtype of the weights received from rollout workers. Defaults to torch.bfloat16.
"""
def __init__(
self,
bucket_size: int,
group_name: str = "default",
rebuild_group: bool = False,
is_master: bool = False,
rollout_dtype: torch.dtype = torch.bfloat16,
) -> None:
self.bucket_size = bucket_size
self.group_name = group_name
self.rebuild_group = rebuild_group
self.rollout_dtype = rollout_dtype
self.pyhccl = None
self.device = torch.npu.current_device()
# start zeromq server for broadcasting bucket tensor metadata
self.is_master = is_master
self.topic = "bucket_metadata"
if self.is_master:
self._start_zmq_server()
self.dist_port, _ = get_free_port(self.ip)
def prepare(self) -> MasterMetadata:
self.send_buf = torch.zeros(self.bucket_size, dtype=torch.uint8, device="npu")
self.recv_buf = torch.zeros(self.bucket_size, dtype=torch.uint8, device="npu")
return (
MasterMetadata(zmq_ip=self.ip, zmq_port=self.zmq_port, dist_ip=self.ip, dist_port=self.dist_port)
if self.is_master
else None
)
def finalize(self):
"""Destroy the HCCL process group if rebuild_group is True."""
if self.rebuild_group:
if self.rank >= 0:
self.pyhccl.destroyComm(self.pyhccl.comm)
self.pyhccl = None
self.rank = None
self.world_size = None
self.send_buf = None
self.recv_buf = None
@classmethod
def build_topology(cls, trainer_world_size: int, rollout_world_size: int, metadata: list[dict]):
trainer_kwargs = {
"rank": [0] + [-1] * (trainer_world_size - 1),
"world_size": [rollout_world_size + 1] * trainer_world_size,
"master_metadata": [metadata[0]] * trainer_world_size,
}
rollout_kwargs = {
"rank": list(range(1, rollout_world_size + 1)),
"world_size": [rollout_world_size + 1] * rollout_world_size,
"master_metadata": [metadata[0]] * rollout_world_size,
}
return trainer_kwargs, rollout_kwargs
def _start_zmq_server(self):
self.ip = ray.util.get_node_ip_address().strip("[]")
self.zmq_port, self.listen_sock = get_free_port(self.ip)
context = zmq.Context()
self.socket = context.socket(zmq.PUB)
if is_valid_ipv6_address(self.ip):
address = f"tcp://[{self.ip}]:{self.zmq_port}"
self.socket.setsockopt(zmq.IPV6, 1)
else:
address = f"tcp://{self.ip}:{self.zmq_port}"
self.socket.bind(address)
def _connect_zmq_client(self, metadata: MasterMetadata):
assert not self.is_master, "Master process should not connect to other processes."
context = zmq.Context()
self.socket = context.socket(zmq.SUB)
if is_valid_ipv6_address(metadata.zmq_ip):
address = f"tcp://[{metadata.zmq_ip}]:{metadata.zmq_port}"
self.socket.setsockopt(zmq.IPV6, 1)
else:
address = f"tcp://{metadata.zmq_ip}:{metadata.zmq_port}"
self.socket.connect(address)
self.socket.setsockopt_string(zmq.SUBSCRIBE, self.topic)
def init_process_group(self, rank: int, world_size: int, master_metadata: MasterMetadata):
"""Initialize the HCCL process group.
Args:
rank (int): The rank of the current process.
world_size (int): The total number of processes.
"""
# For trainer workers other than rank 0, their rank should be -1.
if rank < 0:
self.rank = rank
self.world_size = world_size
return
if self.rebuild_group or self.pyhccl is None:
self.pyhccl = stateless_init_process_group(
master_metadata.dist_ip, master_metadata.dist_port, rank, world_size, self.device
)
self.rank = rank
self.world_size = world_size
else:
assert self.rank == rank, f"rank {rank} is not equal to self.rank {self.rank}"
assert self.world_size == world_size, (
f"world_size {world_size} is not equal to self.world_size {self.world_size}"
)
if self.rank > 0:
self._connect_zmq_client(master_metadata)
# barrier
signal = torch.tensor([1], dtype=torch.int8, device=torch.npu.current_device())
self.pyhccl.all_reduce(signal)
logger.info(f"init_process_group rank: {self.rank}, world_size: {self.world_size}")
@torch.no_grad()
async def send_weights(self, weights: Generator[tuple[str, torch.Tensor], None, None]):
"""Send the weights of the model.
Args:
weights: A generator that yields the name of the weight tensor and the tensor itself.
"""
assert self.rank <= 0, "Trainer workers other than rank 0 should not send weights."
# For trainer rank other than 0, consume weights without sending.
if self.rank < 0:
for name, weight in weights:
pass
return
send_buf, recv_buf = self.send_buf, self.recv_buf
broadcast_op = None
start_time = time.time()
bucket_meta: dict[str, TensorMeta] = {}
offset = 0
for name, weight in weights:
# fill the tensor bucket
if offset + weight.nbytes > self.bucket_size:
torch.npu.synchronize()
# wait previous broadcast op finish
if broadcast_op is not None:
await broadcast_op.wait_for_complete()
broadcast_op = BroadcastOperation(
rank=self.rank,
process_group=self.pyhccl,
bucket=send_buf,
metadata={"bucket_meta": bucket_meta, "is_last": False},
socket=self.socket,
topic=self.topic,
)
# swap send_buf and recv_buf
send_buf, recv_buf = recv_buf, send_buf
bucket_meta = {}
offset = 0
assert offset + weight.nbytes <= self.bucket_size, (
f"Weight {name}({weight.shape}, {weight.dtype}) is too large to fit in the bucket."
)
bucket_meta[name] = {
"name": name,
"shape": weight.shape,
"dtype": weight.dtype,
"offset": offset,
}
send_buf[offset : offset + weight.nbytes] = weight.view(-1).view(torch.uint8)
offset += weight.nbytes
# broadcast last bucket
torch.npu.synchronize()
if broadcast_op is not None:
await broadcast_op.wait_for_complete()
broadcast_op = BroadcastOperation(
rank=self.rank,
process_group=self.pyhccl,
bucket=send_buf,
metadata={"bucket_meta": bucket_meta, "is_last": True},
socket=self.socket,
topic=self.topic,
)
await broadcast_op.wait_for_complete()
logger.info(f"Rank {self.rank} send weights done, time cost: {time.time() - start_time:.2f}s")
@torch.no_grad()
async def receive_weights(self) -> AsyncGenerator[tuple[str, torch.Tensor], None]:
"""Receive the weights of the model.
Yields:
A tuple of the name of the weight tensor and the tensor itself.
"""
assert self.rank > 0, "Rank 0 should not receive weights."
send_buf, recv_buf = self.send_buf, self.recv_buf
total_bytes, total_params = 0, 0
# receive first bucket
start_time = time.time()
broadcast_op = BroadcastOperation(
rank=self.rank,
process_group=self.pyhccl,
bucket=recv_buf,
metadata=None,
socket=self.socket,
topic=self.topic,
)
metadata = await broadcast_op.wait_for_complete()
total_bytes += self.bucket_size
total_params += len(metadata["bucket_meta"])
# swap send_buf and recv_buf
send_buf, recv_buf = recv_buf, send_buf
while not metadata["is_last"]:
# 1. receive next bucket
broadcast_op = BroadcastOperation(
rank=self.rank,
process_group=self.pyhccl,
bucket=recv_buf,
metadata=None,
socket=self.socket,
topic=self.topic,
)
# 2. yield tensor from send_buf
for name, meta in metadata["bucket_meta"].items():
dtype, shape = meta["dtype"], meta["shape"]
size = dtype.itemsize * shape.numel()
tensor = send_buf[meta["offset"] : meta["offset"] + size].view(dtype=dtype).view(shape)
yield name, tensor
# 3. wait for next bucket broadcast finish
metadata = await broadcast_op.wait_for_complete()
total_bytes += self.bucket_size
total_params += len(metadata["bucket_meta"])
# 4. swap send_buf and recv_buf
torch.npu.synchronize() # sync non-blocking copy
send_buf, recv_buf = recv_buf, send_buf
# yield tensor from send_buf
for name, meta in metadata["bucket_meta"].items():
dtype, shape = meta["dtype"], meta["shape"]
size = dtype.itemsize * shape.numel()
tensor = send_buf[meta["offset"] : meta["offset"] + size].view(dtype=dtype).view(shape)
yield name, tensor
time_cost = time.time() - start_time
bandwidth = total_bytes / time_cost / (1024 * 1024 * 1024)
logger.info(
f"Rank {self.rank} receive weights done, total_params: {total_params}, "
f"time cost: {time_cost:.2f}s, bandwidth: {bandwidth:.2f} GB/s"
)
|
verl__checkpoint_engine__hccl_checkpoint_engine.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import os
import time
from dataclasses import dataclass
from typing import AsyncGenerator, Generator
from unittest.mock import patch
with patch("importlib.metadata.distributions", return_value=[]):
import cupy as cp
import ray
import ray.util.collective as collective
import torch
import zmq
from verl.checkpoint_engine.base import CheckpointEngine, CheckpointEngineRegistry, TensorMeta
from verl.utils.net_utils import get_free_port, is_valid_ipv6_address
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
@dataclass
class MasterMetadata:
zmq_ip: str
zmq_port: int
class BroadcastOperation:
"""Async broadcast operation with NCCL in separate thread.
Args:
rank (int): The rank of the current process.
group_name (str): The name of the NCCL process group.
bucket (cp.ndarray | torch.Tensor): The tensor to broadcast.
metadata (dict[str, TensorMeta]): The metadata of the tensor.
socket (zmq.Socket): The zeromq socket to communicate with master.
topic (str): The topic to subscribe.
"""
def __init__(
self,
rank: int,
group_name: str,
bucket: cp.ndarray | torch.Tensor,
metadata: dict[str, TensorMeta],
socket: zmq.Socket,
topic: str,
) -> None:
self.rank = rank
self.group_name = group_name
self.bucket = bucket
self.metadata = metadata
self.socket = socket
self.topic = topic
loop = asyncio.get_running_loop()
self._task = loop.run_in_executor(None, self._run)
def _run(self):
# broadcast tensor meta via zeromq PUB/SUB
if self.rank == 0:
self.socket.send_string(self.topic, flags=zmq.SNDMORE)
self.socket.send_pyobj(self.metadata)
else:
self.socket.recv_string()
self.metadata = self.socket.recv_pyobj()
# broadcast tensor via NCCL
collective.broadcast(self.bucket, src_rank=0, group_name=self.group_name)
async def wait_for_complete(self) -> dict[str, TensorMeta]:
"""Wait for the broadcast operation to complete.
Returns:
dict[str, TensorMeta]: The bucket meta after broadcast.
"""
await self._task
return self.metadata
@CheckpointEngineRegistry.register("nccl")
class NCCLCheckpointEngine(CheckpointEngine):
"""NCCL checkpoint engine with collective communication.
Args:
bucket_size (int): Bucket size in bytes to transfer multiple weights at one time. Note that we use
two buffer to send and recv weights at same time, so the device memory overhead is 2 * bucket_size.
group_name (str): The name of the NCCL process group. Defaults to "default".
rebuild_group (bool): Whether to rebuild the NCCL process group in each update. Defaults to False.
is_master (bool): Whether the current process is the master process. Defaults to False.
rollout_dtype (torch.dtype): The dtype of the weights received from rollout workers. Defaults to torch.bfloat16.
"""
def __init__(
self,
bucket_size: int,
group_name: str = "default",
rebuild_group: bool = False,
is_master: bool = False,
rollout_dtype: torch.dtype = torch.bfloat16,
) -> None:
self.bucket_size = bucket_size
self.group_name = group_name
self.rebuild_group = rebuild_group
self.rollout_dtype = rollout_dtype
# start zeromq server for broadcasting bucket tensor metadata
self.is_master = is_master
self.topic = "bucket_metadata"
if self.is_master:
self._start_zmq_server()
def prepare(self) -> MasterMetadata:
# For master process, use cupy instead of torch to avoid memory register error
# when `PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True`.
if self.is_master:
self.send_buf = cp.zeros(self.bucket_size, dtype=cp.uint8)
self.recv_buf = cp.zeros(self.bucket_size, dtype=cp.uint8)
else:
self.send_buf = torch.zeros(self.bucket_size, dtype=torch.uint8, device="cuda")
self.recv_buf = torch.zeros(self.bucket_size, dtype=torch.uint8, device="cuda")
return MasterMetadata(zmq_ip=self.ip, zmq_port=self.listen_port) if self.is_master else None
def finalize(self):
"""Destroy the NCCL process group if rebuild_group is True."""
if self.rebuild_group:
if self.rank >= 0:
collective.destroy_collective_group(self.group_name)
self.rank = None
self.world_size = None
self.send_buf = None
self.recv_buf = None
@classmethod
def build_topology(cls, trainer_world_size: int, rollout_world_size: int, metadata: list[dict]):
trainer_kwargs = {
"rank": [0] + [-1] * (trainer_world_size - 1),
"world_size": [rollout_world_size + 1] * trainer_world_size,
"master_metadata": [metadata[0]] * trainer_world_size,
}
rollout_kwargs = {
"rank": list(range(1, rollout_world_size + 1)),
"world_size": [rollout_world_size + 1] * rollout_world_size,
"master_metadata": [metadata[0]] * rollout_world_size,
}
return trainer_kwargs, rollout_kwargs
def _start_zmq_server(self):
self.ip = ray.util.get_node_ip_address().strip("[]")
self.listen_port, self.listen_sock = get_free_port(self.ip)
context = zmq.Context()
self.socket = context.socket(zmq.PUB)
if is_valid_ipv6_address(self.ip):
address = f"tcp://[{self.ip}]:{self.listen_port}"
self.socket.setsockopt(zmq.IPV6, 1)
else:
address = f"tcp://{self.ip}:{self.listen_port}"
self.socket.bind(address)
def _connect_zmq_client(self, metadata: MasterMetadata):
assert not self.is_master, "Master process should not connect to other processes."
context = zmq.Context()
self.socket = context.socket(zmq.SUB)
if is_valid_ipv6_address(metadata.zmq_ip):
address = f"tcp://[{metadata.zmq_ip}]:{metadata.zmq_port}"
self.socket.setsockopt(zmq.IPV6, 1)
else:
address = f"tcp://{metadata.zmq_ip}:{metadata.zmq_port}"
self.socket.connect(address)
self.socket.setsockopt_string(zmq.SUBSCRIBE, self.topic)
def init_process_group(self, rank: int, world_size: int, master_metadata: MasterMetadata):
"""Initialize the NCCL process group.
Args:
rank (int): The rank of the current process.
world_size (int): The total number of processes.
"""
# For trainer workers other than rank 0, their rank should be -1.
if rank < 0:
self.rank = rank
self.world_size = world_size
return
if self.rebuild_group or not collective.is_group_initialized(self.group_name):
collective.init_collective_group(world_size, rank, "nccl", self.group_name)
self.rank = rank
self.world_size = world_size
else:
assert self.rank == rank, f"rank {rank} is not equal to self.rank {self.rank}"
assert self.world_size == world_size, (
f"world_size {world_size} is not equal to self.world_size {self.world_size}"
)
if self.rank > 0:
self._connect_zmq_client(master_metadata)
collective.barrier(self.group_name)
logger.info(f"init_process_group rank: {self.rank}, world_size: {self.world_size}")
@torch.no_grad()
async def send_weights(self, weights: Generator[tuple[str, torch.Tensor], None, None]):
"""Send the weights of the model.
Args:
weights: A generator that yields the name of the weight tensor and the tensor itself.
"""
assert self.rank <= 0, "Trainer workers other than rank 0 should not send weights."
# For trainer rank other than 0, consume weights without sending.
if self.rank < 0:
for name, weight in weights:
pass
return
send_buf, recv_buf = self.send_buf, self.recv_buf
broadcast_op = None
start_time = time.time()
bucket_meta: dict[str, TensorMeta] = {}
offset = 0
for name, weight in weights:
# fill the tensor bucket
if offset + weight.nbytes > self.bucket_size:
torch.cuda.synchronize()
# wait previous broadcast op finish
if broadcast_op is not None:
await broadcast_op.wait_for_complete()
broadcast_op = BroadcastOperation(
rank=self.rank,
group_name=self.group_name,
bucket=send_buf,
metadata={"bucket_meta": bucket_meta, "is_last": False},
socket=self.socket,
topic=self.topic,
)
# swap send_buf and recv_buf
send_buf, recv_buf = recv_buf, send_buf
bucket_meta = {}
offset = 0
assert offset + weight.nbytes <= self.bucket_size, (
f"Weight {name}({weight.shape}, {weight.dtype}) is too large to fit in the bucket."
)
bucket_meta[name] = {
"name": name,
"shape": weight.shape,
"dtype": weight.dtype,
"offset": offset,
}
send_buf[offset : offset + weight.nbytes] = cp.asarray(weight.view(-1).view(torch.uint8))
offset += weight.nbytes
# broadcast last bucket
torch.cuda.synchronize()
if broadcast_op is not None:
await broadcast_op.wait_for_complete()
broadcast_op = BroadcastOperation(
rank=self.rank,
group_name=self.group_name,
bucket=send_buf,
metadata={"bucket_meta": bucket_meta, "is_last": True},
socket=self.socket,
topic=self.topic,
)
await broadcast_op.wait_for_complete()
logger.info(f"Rank {self.rank} send weights done, time cost: {time.time() - start_time:.2f}s")
@torch.no_grad()
async def receive_weights(self) -> AsyncGenerator[tuple[str, torch.Tensor], None]:
"""Receive the weights of the model.
Yields:
A tuple of the name of the weight tensor and the tensor itself.
"""
assert self.rank > 0, "Rank 0 should not receive weights."
send_buf, recv_buf = self.send_buf, self.recv_buf
total_bytes, total_params = 0, 0
# receive first bucket
start_time = time.time()
broadcast_op = BroadcastOperation(
rank=self.rank,
group_name=self.group_name,
bucket=recv_buf,
metadata=None,
socket=self.socket,
topic=self.topic,
)
metadata = await broadcast_op.wait_for_complete()
total_bytes += self.bucket_size
total_params += len(metadata["bucket_meta"])
# swap send_buf and recv_buf
send_buf, recv_buf = recv_buf, send_buf
while not metadata["is_last"]:
# 1. receive next bucket
broadcast_op = BroadcastOperation(
rank=self.rank,
group_name=self.group_name,
bucket=recv_buf,
metadata=None,
socket=self.socket,
topic=self.topic,
)
# 2. yield tensor from send_buf
for name, meta in metadata["bucket_meta"].items():
dtype, shape = meta["dtype"], meta["shape"]
size = dtype.itemsize * shape.numel()
tensor = send_buf[meta["offset"] : meta["offset"] + size].view(dtype=dtype).view(shape)
yield name, tensor
# 3. wait for next bucket broadcast finish
metadata = await broadcast_op.wait_for_complete()
total_bytes += self.bucket_size
total_params += len(metadata["bucket_meta"])
# 4. swap send_buf and recv_buf
torch.cuda.synchronize() # sync non-blocking copy
send_buf, recv_buf = recv_buf, send_buf
# yield tensor from send_buf
for name, meta in metadata["bucket_meta"].items():
dtype, shape = meta["dtype"], meta["shape"]
size = dtype.itemsize * shape.numel()
tensor = send_buf[meta["offset"] : meta["offset"] + size].view(dtype=dtype).view(shape)
yield name, tensor
time_cost = time.time() - start_time
bandwidth = total_bytes / time_cost / (1024 * 1024 * 1024)
logger.info(
f"Rank {self.rank} receive weights done, total_params: {total_params}, "
f"time cost: {time_cost:.2f}s, bandwidth: {bandwidth:.2f} GB/s"
)
|
verl__checkpoint_engine__nccl_checkpoint_engine.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import os
import time
import uuid
from collections import defaultdict, deque
from dataclasses import dataclass
from typing import AsyncGenerator, Generator
from unittest.mock import patch
with patch("importlib.metadata.distributions", return_value=[]):
import cupy as cp
import nixl._api as nixl_api
import nixl._bindings as nixl_bindings
import ray
import torch
import zmq
import zmq.asyncio
from verl.checkpoint_engine.base import CheckpointEngine, CheckpointEngineRegistry, TensorMeta
from verl.utils.net_utils import get_free_port, is_valid_ipv6_address
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
@dataclass
class NixlAgentMetadata:
agent_name: str
agent_metadata: bytes
zmq_ip: str
zmq_port: int
class NixlAgent:
"""This is a wrapper class for nixl_agent, the main purpose is to use ZeroMQ instead of
`nixl_agent.send_notif` to send bucket tensor metadata.
"""
def __init__(self):
self.agent_name = str(uuid.uuid4())
self.agent = nixl_api.nixl_agent(self.agent_name)
self.notifications: dict[str, deque[bytes]] = defaultdict(deque)
self.start_zmq_server()
self.zmq_clients: dict[str, zmq.Socket] = {}
self.messages: dict[str, deque[bytes]] = defaultdict(deque)
def __getattr__(self, name):
attr = getattr(self.agent, name)
if callable(attr):
def wrapper(*args, **kwargs):
return attr(*args, **kwargs)
return wrapper
else:
return attr
def get_agent_metadata(self) -> NixlAgentMetadata:
return NixlAgentMetadata(
agent_name=self.agent_name,
agent_metadata=self.agent.get_agent_metadata(),
zmq_ip=self.ip,
zmq_port=self.listen_port,
)
def start_zmq_server(self):
self.ip = ray.util.get_node_ip_address().strip("[]")
self.listen_port, self.listen_sock = get_free_port(self.ip)
context = zmq.asyncio.Context()
self.socket = context.socket(zmq.PULL)
if is_valid_ipv6_address(self.ip):
address = f"tcp://[{self.ip}]:{self.listen_port}"
self.socket.setsockopt(zmq.IPV6, 1)
else:
address = f"tcp://{self.ip}:{self.listen_port}"
self.socket.bind(address)
def add_remote_agent(self, metadata: NixlAgentMetadata) -> str:
agent_name = self.agent.add_remote_agent(metadata.agent_metadata).decode("utf-8")
assert agent_name == metadata.agent_name, f"Agent name {agent_name} not equal to {metadata.agent_name}"
context = zmq.Context()
socket = context.socket(zmq.PUSH)
if is_valid_ipv6_address(metadata.zmq_ip):
address = f"tcp://[{metadata.zmq_ip}]:{metadata.zmq_port}"
socket.setsockopt(zmq.IPV6, 1)
else:
address = f"tcp://{metadata.zmq_ip}:{metadata.zmq_port}"
socket.connect(address)
self.zmq_clients[agent_name] = socket
return agent_name
def remove_remote_agent(self, agent_name: str):
self.agent.remove_remote_agent(agent_name)
socket = self.zmq_clients.pop(agent_name)
socket.close()
def send_message(self, agent_name, message: dict):
socket = self.zmq_clients[agent_name]
socket.send_pyobj((self.agent_name, message), zmq.DONTWAIT)
async def read_message(self, agent_name: str) -> dict:
while len(self.messages[agent_name]) == 0:
recv_agent_name, message = await self.socket.recv_pyobj()
self.messages[recv_agent_name].append(message)
return self.messages[agent_name].popleft()
async def get_notification(self, remote_name: str) -> bytes:
while len(self.notifications[remote_name]) == 0:
notifs = self.agent.get_new_notifs()
for remote_name, notif in notifs.items():
self.notifications[remote_name].extend(notif)
await asyncio.sleep(0)
return self.notifications[remote_name].popleft()
class ReadableOperation:
"""Encapsulates a readable operation to remote agent.
1. send metadata to remote agent
2. wait until remote agent read complete.
Args:
agent (NixlAgent): The Nixl agent.
remote_agent (str): The name of the remote agent.
local_descs (nixl_bindings.nixlXferDList): The local transfer descriptors.
metadata (dict): Metadata for the read operation.
bucket_size (int): The size of the bucket in bytes.
"""
def __init__(
self,
agent: NixlAgent,
remote_agent: str,
local_descs: nixl_bindings.nixlXferDList,
metadata: dict,
):
self.agent = agent
self.remote_agent = remote_agent
self.local_descs = local_descs
self.notify_key = uuid.uuid4().bytes
message = {"notify_key": self.notify_key, "remote_descs": self.local_descs, **metadata}
self.agent.send_message(self.remote_agent, message)
async def wait_for_complete(self):
"""Block until remote agent read complete."""
notification = await self.agent.get_notification(self.remote_agent)
assert self.notify_key == notification, f"Notify key {self.notify_key} not equal to {notification}"
logger.debug(f"ReadableOperation to {self.remote_agent} complete")
class ReadOperation:
"""Encapsulates a read operation from remote agent.
1. read medata from remote agent
2. start read transfer operation
3. wait until read complete
Args:
agent (NixlAgent): The Nixl agent.
remote_agent (str): The name of the remote agent.
local_descs (nixl_bindings.nixlXferDList): The local transfer descriptors.
bucket_size (int): The size of the bucket in bytes.
"""
def __init__(self, agent: NixlAgent, remote_agent: str, local_descs: nixl_bindings.nixlXferDList, bucket_size: int):
self.agent = agent
self.remote_agent = remote_agent
self.local_descs = local_descs
self.remote_descs = None
self.xfer_handle = None
self.notify_key = None
self.bucket_size = bucket_size
self.start_time = None
async def read_metadata(self) -> dict:
"""Block until the remote agent sends the metadata.
Returns:
dict: Metadata from the remote agent.
"""
metadata = await self.agent.read_message(self.remote_agent)
self.remote_descs = metadata.pop("remote_descs")
self.notify_key = metadata.pop("notify_key")
return metadata
def begin_read(self):
"""Start the read operation."""
assert self.remote_descs is not None and self.notify_key is not None
self.xfer_handle = self.agent.initialize_xfer(
"READ", self.local_descs, self.remote_descs, self.remote_agent, self.notify_key
)
state = self.agent.transfer(self.xfer_handle)
assert state != "ERR", f"Read from {self.remote_agent} got to {state} state."
self.start_time = time.time()
async def wait_for_complete(self):
"""Block until the read operation complete."""
while True:
state = self.agent.check_xfer_state(self.xfer_handle)
if state == "ERR":
logger.error(f"Read from {self.remote_agent} got to {state} state.")
exit(-1)
elif state == "DONE":
break
else:
await asyncio.sleep(0)
self.agent.release_xfer_handle(self.xfer_handle)
end_time = time.time()
bandwidth = self.bucket_size / (end_time - self.start_time) / (1024 * 1024 * 1024)
logger.debug(f"ReadOperation read data from {self.remote_agent} complete, bandwidth: {bandwidth:.2f} GB/s")
@CheckpointEngineRegistry.register("nixl")
class NIXLCheckpointEngine(CheckpointEngine):
"""NIXL checkpoint engine with p2p communication, support various backends: ucx, uccl, mooncacke, etc.
For UCX backend, some environment variables need to be set: UCX_TLS, UCX_IB_GID_INDEX, UCX_IB_DEVICES, etc.
Please refer to: https://openucx.readthedocs.io/en/master/faq.html
Args:
bucket_size (int): Bucket size in bytes to transfer multiple weights at one time. Note that we use
two buffer to send and recv weights at same time, so the device memory overhead is 2 * bucket_size.
device (str): The device to use for the checkpoint engine, "cpu" or "cuda".
rollout_dtype (torch.dtype): The dtype of the weights received from rollout workers. Defaults to torch.bfloat16.
"""
def __init__(
self,
bucket_size: int,
device: str = "cuda",
rollout_dtype: torch.dtype = torch.bfloat16,
is_master: bool = False,
):
self.bucket_size = bucket_size
self.device = device
self.rollout_dtype = rollout_dtype
self.agent = NixlAgent()
self.is_master = is_master
def prepare(self) -> NixlAgentMetadata:
"""Prepare send and recv bucket.
Returns:
NixlAgentMetadata: The metadata of the current nixl agent.
"""
# For master process, use cupy instead of torch to avoid memory register error
# when `PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True`.
if self.device == "cuda":
send_buf = cp.zeros(self.bucket_size, dtype=cp.uint8)
recv_buf = cp.zeros(self.bucket_size, dtype=cp.uint8)
self.send_buf = torch.as_tensor(send_buf, dtype=torch.uint8)
self.recv_buf = torch.as_tensor(recv_buf, dtype=torch.uint8)
else:
self.send_buf = torch.zeros(self.bucket_size, dtype=torch.uint8, device=self.device, pin_memory=True)
self.recv_buf = torch.zeros(self.bucket_size, dtype=torch.uint8, device=self.device, pin_memory=True)
self.send_reg_descs = self.agent.register_memory(self.send_buf)
self.recv_reg_descs = self.agent.register_memory(self.recv_buf)
self.send_descs = self.agent.get_xfer_descs(self.send_buf)
self.recv_descs = self.agent.get_xfer_descs(self.recv_buf)
return self.agent.get_agent_metadata()
@classmethod
def build_topology(cls, trainer_world_size: int, rollout_world_size: int, metadata: list[dict]):
trainer_kwargs = {
"method": ["init_process_group"] * trainer_world_size,
"rank": [0] + [-1] * (trainer_world_size - 1),
"world_size": [rollout_world_size + 1] * trainer_world_size,
"prev_agent_metadata": [None] * trainer_world_size,
"next_agent_metadata": [metadata[-rollout_world_size]] + [None] * (trainer_world_size - 1),
}
rollout_kwargs = {
"method": ["init_process_group"] * rollout_world_size,
"rank": list(range(1, rollout_world_size + 1)),
"world_size": [rollout_world_size + 1] * rollout_world_size,
"prev_agent_metadata": [metadata[0]] + metadata[-rollout_world_size:-1],
"next_agent_metadata": metadata[-rollout_world_size + 1 :] + [None],
}
return trainer_kwargs, rollout_kwargs
def init_process_group(
self, rank: int, world_size: int, prev_agent_metadata: NixlAgentMetadata, next_agent_metadata: NixlAgentMetadata
):
"""Setup the communication with the previous and next agent.
Args:
rank (int): The rank of the current process.
world_size (int): The total number of processes.
prev_agent_metadata (NixlAgentMetadata): The metadata of the previous nixl agent.
next_agent_metadata (NixlAgentMetadata): The metadata of the next nixl agent.
"""
if rank < 0:
assert not prev_agent_metadata and not next_agent_metadata, (
f"rank {rank} should not have prev_agent_metadata or next_agent_metadata"
)
elif rank == 0:
assert not prev_agent_metadata and next_agent_metadata, f"rank {rank} should have next_agent_metadata"
elif 0 < rank < world_size - 1:
assert prev_agent_metadata and next_agent_metadata, (
f"rank {rank} should have prev_agent_metadata and next_agent_metadata"
)
elif rank == world_size - 1:
assert prev_agent_metadata and not next_agent_metadata, (
f"rank {rank} should have prev_agent_metadata and not next_agent_metadata"
)
self.rank = rank
self.world_size = world_size
self.prev_agent = None
self.next_agent = None
if prev_agent_metadata is not None:
self.prev_agent = self.agent.add_remote_agent(prev_agent_metadata)
if next_agent_metadata is not None:
self.next_agent = self.agent.add_remote_agent(next_agent_metadata)
logger.info(
f"init_process_group rank: {self.rank}, world_size: {self.world_size}, "
f"prev_agent: {self.prev_agent}, next_agent: {self.next_agent}"
)
def finalize(self):
"""Cleanup communication with the previous and next agent, and deregister the memory."""
if self.prev_agent:
self.agent.remove_remote_agent(self.prev_agent)
if self.next_agent:
self.agent.remove_remote_agent(self.next_agent)
self.agent.deregister_memory(self.send_reg_descs)
self.agent.deregister_memory(self.recv_reg_descs)
self.send_buf = None
self.recv_buf = None
self.send_reg_descs = None
self.recv_reg_descs = None
self.send_descs = None
self.recv_descs = None
self.rank = None
self.world_size = None
self.prev_agent = None
self.next_agent = None
@torch.no_grad()
async def send_weights(self, weights: Generator[tuple[str, torch.Tensor], None, None]):
"""Send the weights of the model.
Args:
weights: A generator that yields the name of the weight tensor and the tensor itself.
"""
assert self.rank <= 0, "Trainer workers other than rank 0 should not send weights."
# For trainer workers other than rank 0, just consume weights and do nothing.
if self.rank < 0:
for name, weight in weights:
pass
return
assert self.next_agent is not None, "Next agent is not set."
send_buf, recv_buf = self.send_buf, self.recv_buf
send_descs, recv_descs = self.send_descs, self.recv_descs
readable_op = None
start_time = time.time()
bucket_meta: dict[str, TensorMeta] = {}
offset = 0
for name, weight in weights:
# fill the tensor bucket
if offset + weight.nbytes > self.bucket_size:
torch.cuda.synchronize()
# wait previous bucket to be received
if readable_op is not None:
await readable_op.wait_for_complete()
# send bucket meta to next agent
readable_op = ReadableOperation(
self.agent,
self.next_agent,
send_descs,
{"bucket_meta": bucket_meta, "is_last": False},
)
# swap send and recv buf
send_buf, recv_buf = recv_buf, send_buf
send_descs, recv_descs = recv_descs, send_descs
bucket_meta = {}
offset = 0
assert offset + weight.nbytes <= self.bucket_size, (
f"Weight {name}({weight.shape}, {weight.dtype}) is too large to fit in the bucket."
)
bucket_meta[name] = {
"name": name,
"shape": weight.shape,
"dtype": weight.dtype,
"offset": offset,
}
send_buf[offset : offset + weight.nbytes].copy_(weight.view(-1).view(torch.uint8), non_blocking=True)
offset += weight.nbytes
# send last bucket meta to next agent
torch.cuda.synchronize()
if readable_op is not None:
await readable_op.wait_for_complete()
readable_op = ReadableOperation(
self.agent, self.next_agent, send_descs, {"bucket_meta": bucket_meta, "is_last": True}
)
await readable_op.wait_for_complete()
logger.info(f"Rank {self.rank} send weights done, time cost: {time.time() - start_time:.2f}s")
@torch.no_grad()
async def receive_weights(self) -> AsyncGenerator[tuple[str, torch.Tensor], None]:
"""Receive the weights of the model.
Yields:
A tuple of the name of the weight tensor and the tensor itself.
"""
assert self.prev_agent is not None, "Previous agent is not set."
send_buf, recv_buf = self.send_buf, self.recv_buf
send_descs, recv_descs = self.send_descs, self.recv_descs
total_bytes, total_params = 0, 0
# receive first bucket from previous agent
start_time = time.time()
read_op = ReadOperation(self.agent, self.prev_agent, recv_descs, self.bucket_size)
metadata = await read_op.read_metadata()
read_op.begin_read()
await read_op.wait_for_complete()
total_bytes += self.bucket_size
total_params += len(metadata["bucket_meta"])
# swap send and recv buf
send_buf, recv_buf = recv_buf, send_buf
send_descs, recv_descs = recv_descs, send_descs
while not metadata["is_last"]:
# 1. send bucket to next agent
readable_op = None
if self.next_agent is not None:
readable_op = ReadableOperation(
self.agent,
self.next_agent,
send_descs,
metadata,
)
# 2. receive bucket from previous agent
read_op = ReadOperation(self.agent, self.prev_agent, recv_descs, self.bucket_size)
next_metadata = await read_op.read_metadata()
read_op.begin_read()
# 3. yield tensor from send_buf
for name, meta in metadata["bucket_meta"].items():
dtype, shape = meta["dtype"], meta["shape"]
size = dtype.itemsize * shape.numel()
tensor = send_buf[meta["offset"] : meta["offset"] + size].view(dtype=dtype).view(shape)
yield name, tensor
# 4. wait for next agent read complete and read from previous agent complete
if readable_op is not None:
await readable_op.wait_for_complete()
await read_op.wait_for_complete()
total_bytes += self.bucket_size
total_params += len(next_metadata["bucket_meta"])
# 5. swap send and recv buf
torch.cuda.synchronize() # sync non-blocking copy
metadata = next_metadata
send_buf, recv_buf = recv_buf, send_buf
send_descs, recv_descs = recv_descs, send_descs
# send last bucket to next agent
readable_op = None
if self.next_agent is not None:
readable_op = ReadableOperation(
self.agent,
self.next_agent,
send_descs,
metadata,
)
# yield tensor from send_buf
for name, meta in metadata["bucket_meta"].items():
dtype, shape = meta["dtype"], meta["shape"]
size = dtype.itemsize * shape.numel()
tensor = send_buf[meta["offset"] : meta["offset"] + size].view(dtype=dtype).view(shape)
yield name, tensor
# wait for next agent read complete
if readable_op is not None:
await readable_op.wait_for_complete()
time_cost = time.time() - start_time
bandwidth = total_bytes / time_cost / (1024 * 1024 * 1024)
logger.info(
f"Rank {self.rank} receive weights done, total_params: {total_params}, "
f"time cost: {time_cost:.2f}s, bandwidth: {bandwidth:.2f} GB/s"
)
|
verl__checkpoint_engine__nixl_checkpoint_engine.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import heapq
import logging
import os
import random
from abc import ABC, abstractmethod
from typing import Any, Optional
from uuid import uuid4
import hydra
import numpy as np
import ray
import torch
from cachetools import LRUCache
from omegaconf import DictConfig, OmegaConf
from PIL import Image
from pydantic import BaseModel, ConfigDict
from tensordict import TensorDict
from transformers import AutoProcessor, AutoTokenizer
from verl.experimental.agent_loop.prometheus_utils import update_prometheus_config
from verl.experimental.agent_loop.utils import resolve_config_path
from verl.protocol import DataProto
from verl.single_controller.ray.base import RayResourcePool, RayWorkerGroup
from verl.utils import hf_processor, hf_tokenizer
from verl.utils.chat_template import initialize_system_prompt
from verl.utils.dataset.rl_dataset import RLHFDataset, get_dataset_class
from verl.utils.fs import copy_to_local
from verl.utils.model import compute_position_id_with_mask
from verl.utils.ray_utils import get_event_loop
from verl.utils.rollout_trace import (
RolloutTraceConfig,
rollout_trace_attr,
rollout_trace_op,
)
from verl.utils.transferqueue_utils import tqbridge
from verl.workers.rollout.replica import TokenOutput, get_rollout_replica_class
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class AsyncLLMServerManager:
"""
A class to manage multiple OpenAI compatible LLM servers. This class provides
- Load balance: least requests load balancing
- Sticky session: send multi-turn chat completions to same server for automatic prefix caching
"""
def __init__(self, config: DictConfig, server_handles: list[ray.actor.ActorHandle], max_cache_size: int = 10000):
"""Initialize the AsyncLLMServerManager.
Args:
config (DictConfig): YAML config.
server_handles (List[ray.actor.ActorHandle]): OpenAI compatible LLM server actor handles.
max_cache_size (int, optional): max cache size for request_id to server mapping. Defaults to 10000.
"""
self.config = config
self.server_handles = server_handles
random.shuffle(self.server_handles)
# Least requests load balancing
self.weighted_serveres = [[0, idx, server] for idx, server in enumerate(self.server_handles)]
heapq.heapify(self.weighted_serveres)
# LRU cache to map request_id to server
self.request_id_to_server = LRUCache(maxsize=max_cache_size)
def _choose_server(self, request_id: str) -> ray.actor.ActorHandle:
# TODO: implement server pressure awareness load balancing
if request_id in self.request_id_to_server:
return self.request_id_to_server[request_id]
_, _, server = self.weighted_serveres[0]
self.weighted_serveres[0][0] += 1
heapq.heapreplace(self.weighted_serveres, self.weighted_serveres[0])
self.request_id_to_server[request_id] = server
return server
@rollout_trace_op
async def generate(
self,
request_id,
*,
prompt_ids: list[int],
sampling_params: dict[str, Any],
image_data: Optional[list[Any]] = None,
video_data: Optional[list[Any]] = None,
) -> TokenOutput:
"""Generate tokens from prompt ids.
Args:
request_id (str): request id for sticky session.
prompt_ids (List[int]): List of prompt token ids.
sampling_params (Dict[str, Any]): Sampling parameters for the chat completion.
Returns:
TokenOutput: token output
"""
server = self._choose_server(request_id)
output = await server.generate.remote(
request_id=uuid4().hex, # use new request_id for each turn
prompt_ids=prompt_ids,
sampling_params=sampling_params,
image_data=image_data,
video_data=video_data,
)
return output
class AgentLoopMetrics(BaseModel):
"""Agent loop performance metrics."""
generate_sequences: float = 0.0
tool_calls: float = 0.0
num_preempted: int = -1 # -1 means not available
class AgentLoopOutput(BaseModel):
"""Agent loop output."""
prompt_ids: list[int]
"""Prompt token ids."""
response_ids: list[int]
"""Response token ids including LLM generated token, tool response token."""
response_mask: list[int]
"""Response mask, 1 for LLM generated token, 0 for tool response token."""
response_logprobs: Optional[list[float]] = None
"""Log probabilities for the response tokens."""
routed_experts: Optional[Any] = None
"""Routed experts for the total tokens."""
multi_modal_data: Optional[dict[str, Any]] = None
"""Multi-modal data for multi-modal tools."""
reward_score: Optional[float] = None
"""Reward score for the trajectory."""
num_turns: int = 0
"""Number of chat turns, including user, assistant, tool."""
metrics: AgentLoopMetrics
"""Auxiliary performance metrics"""
extra_fields: dict[str, Any] = {}
"""Extra fields for dynamic addition."""
class _InternalAgentLoopOutput(AgentLoopOutput):
"""Internal agent loop output with padded sequences."""
model_config = ConfigDict(arbitrary_types_allowed=True)
prompt_ids: torch.Tensor
"""Padded prompt token ids."""
response_ids: torch.Tensor
"""Padded response token ids."""
input_ids: torch.Tensor
"""Padded input ids(prompt_ids + response_ids)."""
position_ids: torch.Tensor
"""Padded position ids."""
response_mask: torch.Tensor
"""Padded response mask."""
attention_mask: torch.Tensor
"""Padded attention mask."""
response_logprobs: Optional[torch.Tensor] = None
"""Padded log probabilities for the response tokens."""
routed_experts: Optional[torch.Tensor] = None
"""Padded routed experts for the total tokens."""
multi_modal_inputs: Optional[dict[str, torch.Tensor]] = None
"""Multi-modal inputs for processors (e.g., pixel_values, image_grid_thw)."""
extra_fields: dict[str, Any] = {}
"""Extra fields for dynamic addition."""
class DictConfigWrap:
"""Wrapper for DictConfig to avoid hydra.utils.instantiate recursive resolve."""
def __init__(self, config: DictConfig):
self.config = config
class AgentLoopBase(ABC):
"""An agent loop takes an input message, chat with OpenAI compatible LLM server and interact with various
environments."""
def __init__(
self,
trainer_config: DictConfigWrap,
server_manager: AsyncLLMServerManager,
tokenizer: AutoTokenizer,
processor: AutoProcessor,
dataset_cls: type[RLHFDataset],
dataset_config: DictConfigWrap,
**kwargs,
):
"""Initialize agent loop, each sample will have its own loop instance.
Args:
trainer_config (DictConfigWrap): trainer config.
server_manager (AsyncLLMServerManager): OpenAI compatible LLM server manager.
tokenizer (AutoTokenizer): Tokenizer for tokenize messages.
processor (AutoProcessor): Processor for process messages.
dataset_cls (type[Dataset]): Dataset class for creating dataset, Defaults to RLHFDataset.
dataset_config (DictConfigWrap): Dataset config.
"""
self.config = trainer_config.config
self.server_manager = server_manager
self.tokenizer = tokenizer
self.processor = processor
self.dataset_cls = dataset_cls
self.dataset_config = dataset_config.config
self.apply_chat_template_kwargs = self.dataset_config.get("apply_chat_template_kwargs", {})
self.system_prompt = initialize_system_prompt(self.tokenizer, **self.apply_chat_template_kwargs)
self.loop = get_event_loop()
async def process_vision_info(self, messages: list[dict]) -> dict:
"""Extract images and videos from messages.
Args:
messages (list[dict]): Input messages.
Returns:
dict: Multi-modal data with keys "images" and "videos".
"""
multi_modal_data = {}
if self.processor is not None:
images, videos = await self.dataset_cls.process_vision_info(
messages, image_patch_size=self.processor.image_processor.patch_size, config=self.dataset_config
)
if images is not None:
multi_modal_data["images"] = images
if videos is not None:
multi_modal_data["videos"] = videos
return multi_modal_data
async def apply_chat_template(
self,
messages: list[dict],
tools: list[dict] = None,
images: list[Image.Image] = None,
videos: list[tuple[torch.Tensor, dict]] = None,
remove_system_prompt: bool = False,
):
"""Apply chat template to messages with optional tools, images, and videos.
Args:
messages (list[dict]): Input messages.
tools (list[dict], optional): Tools schemas. Defaults to None.
images (list[Image.Image], optional): Input images. Defaults to None.
videos (list[tuple[torch.Tensor, dict]], optional): Input videos. Defaults to None.
remove_system_prompt (bool, optional): Whether to remove system prompt. Defaults to False.
Returns:
list[int]: Prompt token ids.
"""
if self.processor is not None:
raw_prompt = await self.loop.run_in_executor(
None,
lambda: self.processor.apply_chat_template(
messages,
tools=tools,
add_generation_prompt=True,
tokenize=False,
**self.apply_chat_template_kwargs,
),
)
# split the videos and according metadatas
if videos is not None:
videos, video_metadatas = zip(*videos, strict=False)
videos, video_metadatas = list(videos), list(video_metadatas)
else:
video_metadatas = None
model_inputs = self.processor(
text=[raw_prompt],
images=images,
videos=videos,
video_metadata=video_metadatas,
return_tensors="pt",
do_sample_frames=False,
)
prompt_ids = model_inputs.pop("input_ids").squeeze(0).tolist()
else:
prompt_ids = await self.loop.run_in_executor(
None,
lambda: self.tokenizer.apply_chat_template(
messages,
tools=tools,
add_generation_prompt=True,
tokenize=True,
**self.apply_chat_template_kwargs,
),
)
if remove_system_prompt:
prompt_ids = prompt_ids[len(self.system_prompt) :]
return prompt_ids
@abstractmethod
async def run(self, sampling_params: dict[str, Any], **kwargs) -> AgentLoopOutput:
"""Run agent loop to interact with LLM server and environment.
Args:
sampling_params (Dict[str, Any]): LLM sampling params.
**kwargs: dataset fields from `verl.utils.dataset.RLHFDataset`.
Returns:
AgentLoopOutput: Agent loop output.
"""
raise NotImplementedError
"""Agent loop registry: key is agent_name, value is a dict of agent loop config
used by hydra.utils.instantiate to initialize agent loop instance.
https://hydra.cc/docs/advanced/instantiate_objects/overview/
"""
_agent_loop_registry: dict[str, dict] = {}
def register(agent_name: str):
"""Register agent loop class."""
def decorator(subclass: type[AgentLoopBase]) -> type[AgentLoopBase]:
fqdn = f"{subclass.__module__}.{subclass.__qualname__}"
_agent_loop_registry[agent_name] = {"_target_": fqdn}
return subclass
return decorator
class AgentLoopWorker:
"""Agent loop worker takes a batch of messages and run each message in an agent loop."""
def __init__(
self,
config: DictConfig,
server_handles: list[ray.actor.ActorHandle],
reward_loop_worker_handles: list[ray.actor.ActorHandle] = None,
):
"""Initialize agent loop manager.
Args:
config (DictConfig): YAML config.
server_handles (List[ray.actor.ActorHandle]): OpenAI compatible LLM server actor handles.
reward_loop_worker_handles (List[ray.actor.ActorHandle]): Actor handles for streaming reward computation.
"""
self.config = config
# for recipe to change
if not hasattr(self, "server_manager"):
self.server_manager = AsyncLLMServerManager(config, server_handles)
self.dataset_cls = get_dataset_class(config.data)
self.reward_loop_worker_handles = reward_loop_worker_handles
model_path = config.actor_rollout_ref.model.path
self.model_name = "/".join(model_path.split("/")[-2:])
local_path = copy_to_local(config.actor_rollout_ref.model.path)
self.tokenizer = hf_tokenizer(local_path, trust_remote_code=True)
self.processor = hf_processor(local_path, trust_remote_code=True)
agent_loop_config_path = config.actor_rollout_ref.rollout.agent.agent_loop_config_path
if agent_loop_config_path:
resolved_path = resolve_config_path(agent_loop_config_path)
agent_loop_configs = OmegaConf.load(resolved_path)
for agent_loop_config in agent_loop_configs:
_agent_loop_registry[agent_loop_config.name] = agent_loop_config
if self.config.actor_rollout_ref.model.get("custom_chat_template", None) is not None:
if self.processor is not None:
self.processor.chat_template = self.config.actor_rollout_ref.model.custom_chat_template
self.tokenizer.chat_template = self.config.actor_rollout_ref.model.custom_chat_template
trace_config = self.config.actor_rollout_ref.rollout.get("trace", {})
RolloutTraceConfig.init(
self.config.trainer.project_name,
self.config.trainer.experiment_name,
trace_config.get("backend"),
trace_config.get("token2text", False),
trace_config.get("max_samples_per_step_per_worker", None),
)
@tqbridge()
async def generate_sequences(self, batch: DataProto) -> DataProto:
"""Generate sequences from agent loop.
Args:
batch (DataProto): Input batch.
Returns:
DataProto: Output batch.
- prompts: [bsz, prompt_length], prompt token ids from dataset.
- responses: [bsz, response_length], output token ids include response tokens
from LLM generation and observation tokens from tool_calls.
- response_mask: [bsz, response_length], 1 for LLM generated tokens, 0 for observation/padding tokens.
- input_ids: [bsz, prompt_length + response_length], whole sequence token ids, including prompt tokens
and response tokens.
- attention_mask: [bsz, prompt_length + response_length], 0 for padding tokens, 1 for other tokens.
- position_ids: [bsz, prompt_length + response_length], incremental position ids.
For multi-turn conversations:
responses: |<- LLM generation ->|<- tool_calls ->|<- LLM generation ->|<- padding ->|
response_mask: | 1, 1, 1, ..., 1, 1 | 0, 0, .., 0, 0 | 1, 1, 1, ..., 1, 1 | 0, 0, ..., 0|
"""
config = self.config.actor_rollout_ref.rollout
sampling_params = dict(
temperature=config.temperature,
top_p=config.top_p,
top_k=config.top_k,
repetition_penalty=1.0,
logprobs=config.calculate_log_probs,
)
# override sampling params for validation
if batch.meta_info.get("validate", False):
sampling_params["top_p"] = config.val_kwargs.top_p
sampling_params["top_k"] = config.val_kwargs.top_k
sampling_params["temperature"] = config.val_kwargs.temperature
# by default, we assume it's a single turn agent
if "agent_name" not in batch.non_tensor_batch:
default_agent_loop = config.agent.default_agent_loop
batch.non_tensor_batch["agent_name"] = np.array([default_agent_loop] * len(batch), dtype=object)
if "index" in batch.non_tensor_batch:
index = batch.non_tensor_batch["index"]
else:
index = np.arange(len(batch))
max_samples_per_worker = RolloutTraceConfig.get_instance().max_samples_per_step_per_worker
# For n rollouts per sample, we trace all n rollouts for selected samples
# Note: This sampling happens per-worker, so total traces = max_samples_per_worker * num_workers * n
if max_samples_per_worker is not None:
unique_sample_indices = np.unique(index)
if max_samples_per_worker < len(unique_sample_indices):
selected_samples = set(
np.random.choice(unique_sample_indices, max_samples_per_worker, replace=False).tolist()
)
traced_indices = set(i for i in range(len(batch)) if index[i] in selected_samples)
else:
traced_indices = set(range(len(batch)))
else:
traced_indices = set(range(len(batch)))
trajectory_info = await get_trajectory_info(
batch.meta_info.get("global_steps", -1), index.tolist(), batch.meta_info.get("validate", False)
)
tasks = []
for i in range(len(batch)):
trace_this_sample = i in traced_indices
kwargs = {k: v[i] for k, v in batch.non_tensor_batch.items()}
tasks.append(
asyncio.create_task(
self._run_agent_loop(sampling_params, trajectory_info[i], trace=trace_this_sample, **kwargs)
)
)
outputs = await asyncio.gather(*tasks)
output = self._postprocess(outputs, input_non_tensor_batch=batch.non_tensor_batch)
return output
async def _run_agent_loop(
self,
sampling_params: dict[str, Any],
trajectory: dict[str, Any],
*,
agent_name: str,
trace: bool = True,
**kwargs,
) -> _InternalAgentLoopOutput:
with rollout_trace_attr(
step=trajectory["step"],
sample_index=trajectory["sample_index"],
rollout_n=trajectory["rollout_n"],
validate=trajectory["validate"],
name="agent_loop",
trace=trace,
):
assert agent_name in _agent_loop_registry, (
f"Agent loop {agent_name} not registered, registered agent loops: {_agent_loop_registry.keys()}"
)
agent_loop_config = _agent_loop_registry[agent_name]
agent_loop = hydra.utils.instantiate(
config=agent_loop_config,
trainer_config=DictConfigWrap(config=self.config),
server_manager=self.server_manager,
tokenizer=self.tokenizer,
processor=self.processor,
dataset_cls=self.dataset_cls,
dataset_config=DictConfigWrap(self.config.data),
)
output: AgentLoopOutput = await agent_loop.run(sampling_params, **kwargs)
return await self._agent_loop_postprocess(output, **kwargs)
async def _agent_loop_postprocess(self, output, **kwargs) -> _InternalAgentLoopOutput:
"""Perform post-processing operations on the output of each individual agent loop."""
output.extra_fields["raw_prompt"] = kwargs["raw_prompt"]
# Some AgentLoop may have already computed the reward score, e.g SWE-agent.
# NOTE: consistent with the legacy batch version of generate_sequences that existed in the
# deprecated vLLM SPMD rollout implementation.
# prompt_ids: left padded with zeros (e.g., [0,0,0,0,1,2,3,4])
# response_ids: right padded with zeros (e.g., [5,6,7,8,0,0,0,0])
# input_ids: concatenation of prompt + response
# Mask:
# For example, if the prompt is [1,2,3,4] and the response is [5,6,7,(tool start)8,9(tool end),10,11,12]
# - prompt_attention_mask: 0s for padding, 1s for tokens
# e.g., [0,0,0,0,1,1,1,1]
# - response_attention_mask: 0s for padding, 1s for tokens
# e.g., [1,1,1,1,1,1,1,1,1,1,1,0,0,0,0]
# attention_mask: concatenation of prompt_attention_mask and response_attention_mask
# e.g., [0,0,0,0,1,1,1,1(prompt),1,1,1,1,1,1,1,1,1,1,1,0,0,0,0(response)]
# - response_mask: 1s for LLM generated tokens, 0 for tool response/padding tokens
# e.g., [1,1,1,1,1,1,1,(tool start),0,0(tool end),1,1,0,0,0,0]
# - position_ids: sequential positions for tokens, starting at 0
# e.g., [0,0,0,0,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,0,0,0,0]
# TODO(wuxibin): remove padding and use tensordict.
self.tokenizer.padding_side = "left"
prompt_output = self.tokenizer.pad(
{"input_ids": output.prompt_ids},
padding="max_length",
max_length=self.config.actor_rollout_ref.rollout.prompt_length,
return_tensors="pt",
return_attention_mask=True,
)
if prompt_output["input_ids"].dim() == 1:
prompt_output["input_ids"] = prompt_output["input_ids"].unsqueeze(0)
prompt_output["attention_mask"] = prompt_output["attention_mask"].unsqueeze(0)
self.tokenizer.padding_side = "right"
response_output = self.tokenizer.pad(
{"input_ids": output.response_ids},
padding="max_length",
max_length=self.config.actor_rollout_ref.rollout.response_length,
return_tensors="pt",
return_attention_mask=True,
)
if response_output["input_ids"].dim() == 1:
response_output["input_ids"] = response_output["input_ids"].unsqueeze(0)
response_output["attention_mask"] = response_output["attention_mask"].unsqueeze(0)
response_mask_output = self.tokenizer.pad(
{"input_ids": output.response_mask},
padding="max_length",
max_length=self.config.actor_rollout_ref.rollout.response_length,
return_tensors="pt",
return_attention_mask=False,
)
if response_mask_output["input_ids"].dim() == 1:
response_mask_output["input_ids"] = response_mask_output["input_ids"].unsqueeze(0)
response_logprobs = None
if output.response_logprobs is not None:
pad_size = self.config.actor_rollout_ref.rollout.response_length - len(output.response_logprobs)
response_logprobs = torch.tensor(output.response_logprobs + [0.0] * pad_size).unsqueeze(0)
response_mask = response_mask_output["input_ids"] * response_output["attention_mask"]
attention_mask = torch.cat([prompt_output["attention_mask"], response_output["attention_mask"]], dim=1)
input_ids = torch.cat([prompt_output["input_ids"], response_output["input_ids"]], dim=1)
routed_experts = None
if output.routed_experts is not None:
total_length = input_ids.shape[1]
length, layer_num, topk_num = output.routed_experts.shape
if isinstance(output.routed_experts, np.ndarray):
experts_tensor = torch.from_numpy(output.routed_experts)
elif isinstance(output.routed_experts, torch.Tensor):
experts_tensor = output.routed_experts
else:
raise TypeError(f"Unsupported type for routed_experts: {type(output.routed_experts)}")
routed_experts = torch.zeros(1, total_length, layer_num, topk_num, dtype=experts_tensor.dtype)
# Calculate start position: left padding means original prompt starts at the end
start_pos = prompt_output["input_ids"].shape[1] - len(output.prompt_ids)
end_pos = min(start_pos + length, total_length)
# Add boundary checks for robustness
if start_pos < 0 or end_pos > total_length:
raise ValueError(
f"Invalid position range: start_pos={start_pos}, end_pos={end_pos}, total_length={total_length}"
)
routed_experts[:, start_pos:end_pos] = experts_tensor.unsqueeze(0)
multi_modal_inputs = self._compute_multi_modal_inputs(output, input_ids)
position_ids = self._compute_position_ids(input_ids, attention_mask, multi_modal_inputs)
await self._compute_score(
output,
prompts=prompt_output["input_ids"],
responses=response_output["input_ids"],
attention_mask=attention_mask,
input_ids=input_ids,
position_ids=position_ids,
kwargs=kwargs,
)
return _InternalAgentLoopOutput(
prompt_ids=prompt_output["input_ids"],
response_ids=response_output["input_ids"],
input_ids=input_ids,
position_ids=position_ids,
response_mask=response_mask,
attention_mask=attention_mask,
response_logprobs=response_logprobs,
routed_experts=routed_experts,
multi_modal_inputs=multi_modal_inputs,
multi_modal_data=output.multi_modal_data,
reward_score=output.reward_score,
num_turns=output.num_turns,
metrics=output.metrics,
extra_fields=output.extra_fields,
)
def _compute_multi_modal_inputs(self, output, input_ids) -> dict[str, torch.Tensor]:
"""Compute multi-modal inputs with image and video."""
multi_modal_inputs = {}
if self.processor is None:
return multi_modal_inputs
images = output.multi_modal_data.get("images")
videos = output.multi_modal_data.get("videos")
# split the videos and according metadatas
if videos is not None:
videos, video_metadatas = zip(*videos, strict=False)
videos, video_metadatas = list(videos), list(video_metadatas)
else:
video_metadatas = None
current_text = self.tokenizer.decode(input_ids.squeeze(0), skip_special_tokens=True)
multi_modal_inputs = self.processor(
text=[current_text],
images=images,
videos=videos,
video_metadata=video_metadatas,
return_tensors="pt",
do_sample_frames=False,
)
multi_modal_inputs.pop("input_ids", None)
multi_modal_inputs.pop("attention_mask", None)
# We must use dict(multi_modal_inputs) to convert BatchFeature values to a new dict
# because np.array() only keeps the keys for BatchFeature.
multi_modal_inputs = dict(multi_modal_inputs.convert_to_tensors("pt"))
image_grid_thw = multi_modal_inputs.get("image_grid_thw")
if image_grid_thw is not None:
images_seqlens = torch.repeat_interleave(image_grid_thw[:, 1] * image_grid_thw[:, 2], image_grid_thw[:, 0])
multi_modal_inputs["images_seqlens"] = images_seqlens
return multi_modal_inputs
def _compute_position_ids(self, input_ids, attention_mask, multi_modal_inputs) -> torch.Tensor:
"""Compute position ids for multi-modal inputs."""
if self.processor is None:
return compute_position_id_with_mask(attention_mask) # (1, seq_len)
image_grid_thw = multi_modal_inputs.get("image_grid_thw")
video_grid_thw = multi_modal_inputs.get("video_grid_thw")
# Model's get_rope_index has been dynamically bind to the processor.
vision_position_ids, _ = self.processor.get_rope_index(
input_ids=input_ids,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
attention_mask=attention_mask,
)
vision_position_ids = vision_position_ids.transpose(0, 1) # (3, 1, seq_len) => (1, 3, seq_len)
valid_mask = attention_mask[0].bool()
text_position_ids = torch.ones((1, len(input_ids[0])), dtype=torch.long)
text_position_ids[0, valid_mask] = torch.arange(valid_mask.sum().item())
text_position_ids = text_position_ids.unsqueeze(0)
position_ids = torch.cat((text_position_ids, vision_position_ids), dim=1) # (1, 4, seq_length)
return position_ids
async def _compute_score(self, output, prompts, responses, attention_mask, input_ids, position_ids, kwargs):
"""Compute reward score for single sample."""
enable_async_reward = self.reward_loop_worker_handles is not None
if output.reward_score is None and enable_async_reward:
batch = TensorDict(
{
"prompts": prompts, # [1, prompt_length]
"responses": responses, # [1, response_length]
"attention_mask": attention_mask, # [1, prompt_length + response_length]
"input_ids": input_ids, # [1, prompt_length + response_length]
"position_ids": position_ids,
},
batch_size=1,
)
non_tensor_batch = {
**{k: np.array([v]) for k, v in kwargs.items()},
"__num_turns__": np.array([output.num_turns]),
"tool_extra_fields": np.array([output.extra_fields], dtype=object),
}
data = DataProto(
batch=batch,
non_tensor_batch=non_tensor_batch,
)
selected_reward_loop_worker_handle = random.choice(self.reward_loop_worker_handles)
result = await selected_reward_loop_worker_handle.compute_score.remote(data)
output.reward_score = result["reward_score"]
output.extra_fields["reward_extra_info"] = result["reward_extra_info"]
def _postprocess(
self,
inputs: list[_InternalAgentLoopOutput],
input_non_tensor_batch: dict | None = None,
) -> DataProto:
"""Process the padded outputs from _run_agent_loop and combine them into a batch."""
# Convert lists back to tensors and stack them to create a batch.
prompt_ids = torch.cat([input.prompt_ids for input in inputs], dim=0)
response_ids = torch.cat([input.response_ids for input in inputs], dim=0)
response_mask = torch.cat([input.response_mask for input in inputs], dim=0)
attention_mask = torch.cat([input.attention_mask for input in inputs], dim=0)
input_ids = torch.cat([input.input_ids for input in inputs], dim=0)
position_ids = torch.cat([input.position_ids for input in inputs], dim=0)
optional_outputs = {}
if inputs[0].response_logprobs is not None:
optional_outputs["rollout_log_probs"] = torch.cat([input.response_logprobs for input in inputs], dim=0)
if inputs[0].routed_experts is not None:
optional_outputs["routed_experts"] = torch.cat([input.routed_experts for input in inputs], dim=0)
batch = TensorDict(
{
"prompts": prompt_ids, # [bsz, prompt_length]
"responses": response_ids, # [bsz, response_length]
"response_mask": response_mask, # [bsz, response_length]
"input_ids": input_ids, # [bsz, prompt_length + response_length]
"attention_mask": attention_mask, # [bsz, prompt_length + response_length]
# position_ids: [bsz, 3, prompt_length + response_length] or [bsz, prompt_length + response_length]
"position_ids": position_ids,
**optional_outputs,
},
batch_size=len(inputs),
)
scores = [input.reward_score for input in inputs]
if all(score is not None for score in scores):
prompt_length = prompt_ids.size(1)
response_length = attention_mask[:, prompt_length:].sum(dim=1) - 1
rm_scores = torch.zeros_like(response_mask, dtype=torch.float32)
rm_scores[torch.arange(response_mask.size(0)), response_length] = torch.tensor(scores, dtype=torch.float32)
batch["rm_scores"] = rm_scores
non_tensor_batch = {
"__num_turns__": np.array([input.num_turns for input in inputs], dtype=np.int32),
}
if self.reward_loop_worker_handles is None and input_non_tensor_batch:
non_tensor_batch.update(input_non_tensor_batch)
# add reward_extra_info to non_tensor_batch
reward_extra_infos = [input.extra_fields.get("reward_extra_info", {}) for input in inputs]
reward_extra_keys = list(reward_extra_infos[0].keys())
for key in reward_extra_keys:
non_tensor_batch[key] = np.array([info[key] for info in reward_extra_infos])
# Add multi_modal_inputs to non_tensor_batch if any samples have them
multi_modal_inputs_list = [input.multi_modal_inputs for input in inputs]
if any(mmi is not None for mmi in multi_modal_inputs_list):
non_tensor_batch["multi_modal_inputs"] = np.array(multi_modal_inputs_list, dtype=object)
metrics = [input.metrics.model_dump() for input in inputs]
# Collect extra fields from all inputs and convert them to np.ndarray
# Keep a stable set of keys so downstream batch concat stays consistent across agent loops.
extra_fields = {}
default_extra_keys = {
"turn_scores",
"tool_rewards",
"is_cancel",
"param_version_start",
"param_version_end",
"extras",
}
all_keys = set(key for input_item in inputs for key in input_item.extra_fields) | default_extra_keys
for key in all_keys:
temp_arr = np.empty(len(inputs), dtype=object)
temp_arr[:] = [input.extra_fields.get(key) for input in inputs]
extra_fields[key] = temp_arr
non_tensor_batch.update(extra_fields)
# Only include reward_extra_keys in meta_info if rm_scores is in batch
# This avoids conflicts when reward_tensor is merged later in ray_trainer.py
if "rm_scores" in batch.keys():
meta_info = {"metrics": metrics, "reward_extra_keys": reward_extra_keys}
else:
meta_info = {"metrics": metrics}
return DataProto(
batch=batch,
non_tensor_batch=non_tensor_batch,
meta_info=meta_info,
)
def create_transferqueue_client(
self,
):
"""Create a client for data system (TransferQueue)."""
from verl.single_controller.ray.base import get_random_string
from verl.utils.transferqueue_utils import create_transferqueue_client
client_name = get_random_string(length=6)
self.tq_client = create_transferqueue_client(
client_id=f"AgentLoopWorker_{client_name}",
config=self.config.transfer_queue,
)
async def get_trajectory_info(step, index, validate):
"""Get trajectory info.
Args:
step (int): global steps in the trainer.
index (list): form datastore extra_info.index column.
validate (bool): whether is a validate step.
Returns:
list: trajectory.
"""
trajectory_info = []
rollout_n = 0
for i in range(len(index)):
if i > 0 and index[i - 1] == index[i]:
rollout_n += 1
else:
rollout_n = 0
trajectory_info.append({"step": step, "sample_index": index[i], "rollout_n": rollout_n, "validate": validate})
return trajectory_info
class AgentLoopManager:
"""Agent loop manager that manages a group of agent loop workers."""
def __init__(
self,
config: DictConfig,
worker_group: RayWorkerGroup = None,
rollout_resource_pool: RayResourcePool = None,
reward_loop_worker_handles: list[ray.actor.ActorHandle] = None,
):
"""Initialize agent loop manager.
Args:
config (DictConfig): trainer config.
worker_group (RayWorkerGroup): ActorRolloutRef worker group for hybrid mode; None for standalone mode.
rollout_resource_pool (RayResourcePool): Resource pool for actor rollout (Colocate or Standalone mode).
reward_loop_worker_handles (List[ray.actor.ActorHandle]): Actor handles for streaming reward computation.
"""
self.config = config
self.worker_group = worker_group
self.reward_loop_worker_handles = reward_loop_worker_handles
# for recipe to change
if not hasattr(self, "rollout_replica_class"):
self.rollout_replica_class = get_rollout_replica_class(self.config.actor_rollout_ref.rollout.name)
if not hasattr(self, "agent_loop_workers_class"):
self.agent_loop_workers_class = ray.remote(AgentLoopWorker)
self._initialize_llm_servers(rollout_resource_pool)
self._init_agent_loop_workers()
def _initialize_llm_servers(self, rollout_resource_pool: RayResourcePool):
rollout_world_size = (
self.config.actor_rollout_ref.rollout.tensor_model_parallel_size
* self.config.actor_rollout_ref.rollout.data_parallel_size
* self.config.actor_rollout_ref.rollout.pipeline_model_parallel_size
)
world_size = (
self.worker_group.world_size
if self.worker_group
else self.config.trainer.n_gpus_per_node * self.config.trainer.nnodes
)
num_replicas = world_size // rollout_world_size
rollout_config = self.config.actor_rollout_ref.rollout
model_config = self.config.actor_rollout_ref.model
self.rollout_replicas = [
self.rollout_replica_class(
replica_rank=replica_rank,
config=rollout_config,
model_config=model_config,
gpus_per_node=self.config.trainer.n_gpus_per_node,
)
for replica_rank in range(num_replicas)
]
if self.worker_group and rollout_config.name != "trtllm":
self._run_all([server.init_hybrid(self.worker_group) for server in self.rollout_replicas])
elif self.worker_group and rollout_config.name == "trtllm":
self._run_all(
[
server.init_hybrid_colocated(self.worker_group, rollout_resource_pool)
for server in self.rollout_replicas
]
)
else:
self._run_all([server.init_standalone() for server in self.rollout_replicas])
self.server_handles = [server._server_handle for server in self.rollout_replicas]
self.server_addresses = [server._server_address for server in self.rollout_replicas]
print(f"AgentLoopManager: {self.server_addresses}")
# Update Prometheus configuration with server addresses
if rollout_config.prometheus.enable:
if rollout_config.disable_log_stats:
raise ValueError("PROMETHEUS needs disable_log_stats==False, but it is currently True.")
update_prometheus_config(rollout_config.prometheus, self.server_addresses, rollout_config.name)
def _init_agent_loop_workers(self):
self.agent_loop_workers = []
num_workers = self.config.actor_rollout_ref.rollout.agent.num_workers
node_ids = [node["NodeID"] for node in ray.nodes() if node["Alive"] and node["Resources"].get("CPU", 0) > 0]
for i in range(num_workers):
# Round-robin scheduling over the all nodes
node_id = node_ids[i % len(node_ids)]
self.agent_loop_workers.append(
self.agent_loop_workers_class.options(
name=f"agent_loop_worker_{i}" + f"_{uuid4().hex[:8]}",
scheduling_strategy=ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy(
node_id=node_id, soft=True
),
).remote(self.config, self.server_handles, self.reward_loop_worker_handles)
)
def generate_sequences(self, prompts: DataProto) -> DataProto:
"""Split input batch and dispatch to agent loop workers.
Args:
prompts (DataProto): Input batch.
Returns:
DataProto: Output batch.
"""
chunkes = prompts.chunk(len(self.agent_loop_workers))
outputs = ray.get(
[
worker.generate_sequences.remote(chunk)
for worker, chunk in zip(self.agent_loop_workers, chunkes, strict=True)
]
)
output = DataProto.concat(outputs)
# calculate performance metrics
metrics = [output.meta_info.pop("metrics") for output in outputs] # List[List[Dict[str, str]]]
timing = self._performance_metrics(metrics, output)
output.meta_info = {"timing": timing, **outputs[0].meta_info}
return output
def _performance_metrics(self, metrics: list[list[dict[str, str]]], output: DataProto) -> dict[str, float]:
timing = {}
t_generate_sequences = np.array([metric["generate_sequences"] for chunk in metrics for metric in chunk])
t_tool_calls = np.array([metric["tool_calls"] for chunk in metrics for metric in chunk])
num_preempted = np.array([metric["num_preempted"] for chunk in metrics for metric in chunk])
timing["agent_loop/num_preempted/min"] = num_preempted.min()
timing["agent_loop/num_preempted/max"] = num_preempted.max()
timing["agent_loop/num_preempted/mean"] = num_preempted.mean()
timing["agent_loop/generate_sequences/min"] = t_generate_sequences.min()
timing["agent_loop/generate_sequences/max"] = t_generate_sequences.max()
timing["agent_loop/generate_sequences/mean"] = t_generate_sequences.mean()
timing["agent_loop/tool_calls/min"] = t_tool_calls.min()
timing["agent_loop/tool_calls/max"] = t_tool_calls.max()
timing["agent_loop/tool_calls/mean"] = t_tool_calls.mean()
# batch sequence generation is bounded by the slowest sample
slowest = np.argmax(t_generate_sequences + t_tool_calls)
attention_mask = output.batch["attention_mask"][slowest]
prompt_length = output.batch["prompts"].shape[1]
timing["agent_loop/slowest/generate_sequences"] = t_generate_sequences[slowest]
timing["agent_loop/slowest/tool_calls"] = t_tool_calls[slowest]
timing["agent_loop/slowest/prompt_length"] = attention_mask[:prompt_length].sum().item()
timing["agent_loop/slowest/response_length"] = attention_mask[prompt_length:].sum().item()
timing["agent_loop/slowest/num_preempted"] = num_preempted[slowest]
return timing
def clear_kv_cache(self):
"""Clear all rollout kv cache, but don`t sleep."""
self._run_all([replica.clear_kv_cache() for replica in self.rollout_replicas])
def start_profile(self, **kwargs):
"""Start profiling on all rollout replicas."""
self._run_all([replica.start_profile(**kwargs) for replica in self.rollout_replicas])
def stop_profile(self):
"""Stop profiling on all rollout replicas."""
self._run_all([replica.stop_profile() for replica in self.rollout_replicas])
def _run_all(self, tasks: list[asyncio.Task]):
async def run_all():
await asyncio.gather(*tasks)
asyncio.run(run_all())
|
verl__experimental__agent_loop__agent_loop.py
|
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import ray
import yaml
from verl.workers.config.rollout import PrometheusConfig
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def update_prometheus_config(config: PrometheusConfig, server_addresses: list[str], rollout_name: str | None = None):
"""
Update Prometheus configuration file with server addresses and reload on first node.
server_addresses: vllm or sglang server addresses
rollout_name: name of the rollout backend (e.g., "vllm", "sglang")
"""
if not server_addresses:
logger.warning("No server addresses available to update Prometheus config")
return
try:
# Get Prometheus config file path from environment or use default
prometheus_config_json = {
"global": {"scrape_interval": "10s", "evaluation_interval": "10s"},
"scrape_configs": [
{
"job_name": "ray",
"file_sd_configs": [{"files": ["/tmp/ray/prom_metrics_service_discovery.json"]}],
},
{"job_name": "rollout", "static_configs": [{"targets": server_addresses}]},
],
}
# Write configuration file to all nodes
@ray.remote(num_cpus=0)
def write_config_file(config_data, config_path):
os.makedirs(os.path.dirname(config_path), exist_ok=True)
with open(config_path, "w") as f:
yaml.dump(config_data, f, default_flow_style=False, indent=2)
return True
# Reload Prometheus on all nodes. Only master node should succeed, skip errors on other nodes.
@ray.remote(num_cpus=0)
def reload_prometheus(port):
import socket
import subprocess
hostname = socket.gethostname()
ip_address = socket.gethostbyname(hostname)
reload_url = f"http://{ip_address}:{port}/-/reload"
try:
subprocess.run(["curl", "-X", "POST", reload_url], capture_output=True, text=True, timeout=10)
print(f"Reloading Prometheus on node: {reload_url}")
except Exception:
# Skip errors on non-master nodes
pass
# Get all available nodes and schedule tasks on each node
nodes = ray.nodes()
alive_nodes = [node for node in nodes if node["Alive"]]
# Write config files on all nodes
write_tasks = []
for node in alive_nodes:
node_ip = node["NodeManagerAddress"]
task = write_config_file.options(
resources={"node:" + node_ip: 0.001} # Schedule to specific node
).remote(prometheus_config_json, config.file)
write_tasks.append(task)
ray.get(write_tasks)
server_type = rollout_name.upper() if rollout_name else "rollout"
print(f"Updated Prometheus configuration at {config.file} with {len(server_addresses)} {server_type} servers")
# Reload Prometheus on all nodes
reload_tasks = []
for node in alive_nodes:
node_ip = node["NodeManagerAddress"]
task = reload_prometheus.options(
resources={"node:" + node_ip: 0.001} # Schedule to specific node
).remote(config.port)
reload_tasks.append(task)
ray.get(reload_tasks)
except Exception as e:
logger.error(f"Failed to update Prometheus configuration: {e}")
|
verl__experimental__agent_loop__prometheus_utils.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import Any
from uuid import uuid4
from verl.experimental.agent_loop.agent_loop import AgentLoopBase, AgentLoopOutput, register
from verl.tools.utils.tool_registry import initialize_tools_from_config
from verl.utils.profiler import simple_timer
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
@register("single_turn_agent")
class SingleTurnAgentLoop(AgentLoopBase):
"""Naive agent loop that only do single turn chat completion."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.prompt_length = self.config.actor_rollout_ref.rollout.prompt_length
self.response_length = self.config.actor_rollout_ref.rollout.response_length
tool_config_path = self.config.data.tool_config_path
tool_list = initialize_tools_from_config(tool_config_path) if tool_config_path else []
self.tool_schemas = [tool.tool_schema.model_dump(exclude_unset=True, exclude_none=True) for tool in tool_list]
async def run(self, sampling_params: dict[str, Any], **kwargs) -> AgentLoopOutput:
messages = list(kwargs["raw_prompt"])
# 1. extract images and videos from messages
multi_modal_data = await self.process_vision_info(messages)
images = multi_modal_data.get("images")
videos = multi_modal_data.get("videos")
# 2. apply chat template and tokenize
prompt_ids = await self.apply_chat_template(
messages,
tools=self.tool_schemas,
images=images,
videos=videos,
)
# 3. generate sequences
metrics = {}
with simple_timer("generate_sequences", metrics):
output = await self.server_manager.generate(
request_id=uuid4().hex,
prompt_ids=prompt_ids,
sampling_params=sampling_params,
image_data=images,
video_data=videos,
)
if metrics.get("num_preempted") is None:
metrics["num_preempted"] = output.num_preempted if output.num_preempted is not None else -1
response_mask = [1] * len(output.token_ids)
output = AgentLoopOutput(
prompt_ids=prompt_ids,
response_ids=output.token_ids[: self.response_length],
response_mask=response_mask[: self.response_length],
response_logprobs=output.log_probs[: self.response_length] if output.log_probs else None,
routed_experts=(
output.routed_experts[: len(prompt_ids) + self.response_length]
if output.routed_experts is not None
else None
),
multi_modal_data=multi_modal_data,
num_turns=2,
metrics=metrics,
)
# keeping the schema consistent with tool_agent_loop
output.extra_fields.update({"turn_scores": [], "tool_rewards": []})
return output
|
verl__experimental__agent_loop__single_turn_agent_loop.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import json
import logging
import os
from enum import Enum
from typing import Any, Optional
from uuid import uuid4
import torch
from PIL import Image
from transformers import AutoProcessor, AutoTokenizer
from verl.experimental.agent_loop.agent_loop import (
AgentLoopBase,
AgentLoopOutput,
AsyncLLMServerManager,
DictConfigWrap,
register,
)
from verl.experimental.agent_loop.tool_parser import FunctionCall, ToolParser
from verl.experimental.agent_loop.utils import build_gpt_oss_tool_response_text
from verl.interactions.base import BaseInteraction
from verl.interactions.utils.interaction_registry import initialize_interactions_from_config
from verl.tools.schemas import ToolResponse
from verl.tools.utils.tool_registry import initialize_tools_from_config
from verl.utils.profiler import simple_timer
from verl.utils.rollout_trace import rollout_trace_op
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class AgentState(Enum):
PENDING = "pending"
GENERATING = "generating"
PROCESSING_TOOLS = "processing_tools"
TERMINATED = "terminated"
INTERACTING = "interacting"
class AgentData:
"""Encapsulates all state variables for the agent loop. AgentData is passed to tool calling in case that
tool may need to access full history state. User can store any tool session data in `extra_fields`."""
def __init__(
self,
messages: list[dict[str, Any]],
image_data: list[Image.Image],
video_data: list[tuple[torch.Tensor, dict[str, Any]]],
metrics: dict[str, Any],
request_id: str,
tools_kwargs: dict[str, Any],
interaction: Optional[BaseInteraction] = None,
interaction_kwargs: Optional[dict[str, Any]] = None,
):
self.messages = messages
self.image_data = image_data
self.video_data = video_data
self.metrics = metrics
self.request_id = request_id
self.tools_kwargs = tools_kwargs
self.interaction = interaction
self.interaction_kwargs = interaction_kwargs or {}
# State variables
self.prompt_ids: list[int] = []
self.response_ids: list[int] = []
self.response_mask: list[int] = []
self.response_logprobs: list[float] = []
self.turn_scores: list[float] = []
self.tool_rewards: list[float] = []
self.user_turns = 0
self.assistant_turns = 0
# Temporary state for tool calls
self.tool_calls: list[FunctionCall] = []
# Extra fields for dynamic addition, e.g., tool session data
self.extra_fields: dict[str, Any] = {}
@register("tool_agent")
class ToolAgentLoop(AgentLoopBase):
def __init__(
self,
trainer_config: DictConfigWrap,
server_manager: AsyncLLMServerManager,
tokenizer: AutoTokenizer,
processor: AutoProcessor,
**kwargs,
):
super().__init__(trainer_config, server_manager, tokenizer, processor, **kwargs)
config = trainer_config.config
# Initialize tools from config file
self.max_user_turns = config.actor_rollout_ref.rollout.multi_turn.max_user_turns
self.max_assistant_turns = config.actor_rollout_ref.rollout.multi_turn.max_assistant_turns
self.max_parallel_calls = config.actor_rollout_ref.rollout.multi_turn.max_parallel_calls
self.max_tool_response_length = config.actor_rollout_ref.rollout.multi_turn.max_tool_response_length
self.tool_response_truncate_side = config.actor_rollout_ref.rollout.multi_turn.tool_response_truncate_side
tool_config_path = config.actor_rollout_ref.rollout.multi_turn.tool_config_path
tool_list = initialize_tools_from_config(tool_config_path) if tool_config_path else []
self.tools = {tool.name: tool for tool in tool_list}
self.tool_schemas = [tool.tool_schema.model_dump(exclude_unset=True, exclude_none=True) for tool in tool_list]
self.tool_parser = ToolParser.get_tool_parser(
config.actor_rollout_ref.rollout.multi_turn.format, self.tokenizer
)
self.tool_parser_name = config.actor_rollout_ref.rollout.multi_turn.format
self.prompt_length = config.actor_rollout_ref.rollout.prompt_length
self.response_length = config.actor_rollout_ref.rollout.response_length
# Initialize interactions from config file
self.interaction_config_file = config.actor_rollout_ref.rollout.multi_turn.interaction_config_path
if self.interaction_config_file:
self.interaction_map: dict[str, BaseInteraction] = self._initialize_interactions(
self.interaction_config_file
)
@rollout_trace_op
async def run(self, sampling_params: dict[str, Any], **kwargs) -> AgentLoopOutput:
messages = list(kwargs["raw_prompt"])
# extract images and videos from messages
multi_modal_data = await self.process_vision_info(messages)
images = multi_modal_data.get("images")
videos = multi_modal_data.get("videos")
metrics = {}
request_id = uuid4().hex
tools_kwargs = kwargs.get("tools_kwargs", {})
# Initialize interaction if needed
interaction = None
interaction_kwargs = {}
if self.interaction_config_file:
interaction_kwargs = kwargs["extra_info"]["interaction_kwargs"]
if "name" not in interaction_kwargs:
raise ValueError("'name' key is required in interaction_kwargs")
interaction_name = interaction_kwargs["name"]
if interaction_name not in self.interaction_map:
raise ValueError(
f"Interaction '{interaction_name}' not found in interaction_map. Available interactions: "
f"{list(self.interaction_map.keys())}"
)
interaction = self.interaction_map[interaction_name]
await interaction.start_interaction(request_id, **interaction_kwargs)
# Create AgentData instance to encapsulate all state
agent_data = AgentData(
messages=messages,
image_data=images,
video_data=videos,
metrics=metrics,
request_id=request_id,
tools_kwargs=tools_kwargs,
interaction=interaction,
interaction_kwargs=interaction_kwargs,
)
# State machine loop
state = AgentState.PENDING
while state != AgentState.TERMINATED:
if state == AgentState.PENDING:
state = await self._handle_pending_state(agent_data, sampling_params)
elif state == AgentState.GENERATING:
state = await self._handle_generating_state(agent_data, sampling_params)
elif state == AgentState.PROCESSING_TOOLS:
state = await self._handle_processing_tools_state(agent_data)
elif state == AgentState.INTERACTING:
state = await self._handle_interacting_state(agent_data)
else:
logger.error(f"Invalid state: {state}")
state = AgentState.TERMINATED
# Finalize output
response_ids = agent_data.prompt_ids[-len(agent_data.response_mask) :]
prompt_ids = agent_data.prompt_ids[: len(agent_data.prompt_ids) - len(agent_data.response_mask)]
multi_modal_data = {}
if agent_data.image_data is not None:
multi_modal_data["images"] = agent_data.image_data
if agent_data.video_data is not None:
multi_modal_data["videos"] = agent_data.video_data
output = AgentLoopOutput(
prompt_ids=prompt_ids,
response_ids=response_ids[: self.response_length],
response_mask=agent_data.response_mask[: self.response_length],
multi_modal_data=multi_modal_data,
response_logprobs=agent_data.response_logprobs[: self.response_length]
if agent_data.response_logprobs
else None,
num_turns=agent_data.user_turns + agent_data.assistant_turns + 1,
metrics=agent_data.metrics,
extra_fields={},
)
output.extra_fields.update({"turn_scores": agent_data.turn_scores, "tool_rewards": agent_data.tool_rewards})
return output
async def _handle_pending_state(self, agent_data: AgentData, sampling_params: dict[str, Any]) -> AgentState:
"""Handle the pending state: prepare the prompt and start generation."""
prompt_ids = await self.apply_chat_template(
agent_data.messages,
tools=self.tool_schemas,
images=agent_data.image_data,
videos=agent_data.video_data,
)
agent_data.prompt_ids = prompt_ids
return AgentState.GENERATING
async def _handle_generating_state(
self, agent_data: AgentData, sampling_params: dict[str, Any], ignore_termination: bool = False
) -> AgentState:
"""Handle the generating state: generate model response and check for tool calls."""
add_messages: list[dict[str, Any]] = []
with simple_timer("generate_sequences", agent_data.metrics):
output = await self.server_manager.generate(
request_id=agent_data.request_id,
prompt_ids=agent_data.prompt_ids,
sampling_params=sampling_params,
image_data=agent_data.image_data,
video_data=agent_data.video_data,
)
# first time to set num_preempted
if agent_data.metrics.get("num_preempted") is None:
agent_data.metrics["num_preempted"] = output.num_preempted if output.num_preempted is not None else -1
# then add num_preempted to the metrics
else:
agent_data.metrics["num_preempted"] += output.num_preempted if output.num_preempted is not None else 0
agent_data.assistant_turns += 1
agent_data.response_ids = output.token_ids
agent_data.prompt_ids += agent_data.response_ids
agent_data.response_mask += [1] * len(agent_data.response_ids)
if output.log_probs:
agent_data.response_logprobs += output.log_probs
if output.routed_experts is not None:
agent_data.routed_experts = output.routed_experts
# Check termination conditions
if not ignore_termination and len(agent_data.response_mask) >= self.response_length:
return AgentState.TERMINATED
if self.max_assistant_turns and agent_data.assistant_turns >= self.max_assistant_turns:
return AgentState.TERMINATED
if self.max_user_turns and agent_data.user_turns >= self.max_user_turns:
return AgentState.TERMINATED
# Extract tool calls
_, agent_data.tool_calls = await self.tool_parser.extract_tool_calls(agent_data.response_ids)
# Handle interaction if needed
if self.interaction_config_file:
assistant_message = await self.loop.run_in_executor(
None, lambda: self.tokenizer.decode(agent_data.response_ids, skip_special_tokens=True)
)
add_messages.append({"role": "assistant", "content": assistant_message})
agent_data.messages.extend(add_messages)
# Determine next state
if agent_data.tool_calls:
return AgentState.PROCESSING_TOOLS
elif self.interaction_config_file:
return AgentState.INTERACTING
else:
return AgentState.TERMINATED
async def _handle_processing_tools_state(self, agent_data: AgentData) -> AgentState:
"""Handle the processing tools state: execute tool calls and prepare tool responses."""
add_messages: list[dict[str, Any]] = []
new_images_this_turn: list[Any] = [] # Local variable instead of agent_data attribute
tasks = []
tool_call_names = []
for tool_call in agent_data.tool_calls[: self.max_parallel_calls]:
tasks.append(self._call_tool(tool_call, agent_data.tools_kwargs, agent_data))
tool_call_names.append(tool_call.name)
with simple_timer("tool_calls", agent_data.metrics):
responses = await asyncio.gather(*tasks)
# Process tool responses and update multi_modal_data
# Removed: agent_data.new_images_this_turn = []
for tool_response, tool_reward, _ in responses:
# Create message from tool response
if tool_response.image or tool_response.video:
# Multi-modal content with structured format
if not getattr(self.processor, "image_processor", None):
raise ValueError(
"Multimedia data can only be processed by `processor`, but the processor is None. "
"This error is often caused if you are using a LLM model but your tool returns multimodal "
"data. Plase use a vlm as the base model."
)
content = []
if tool_response.image:
content.append({"type": "image"})
if tool_response.video:
content.append({"type": "video"})
if tool_response.text:
content.append({"type": "text", "text": tool_response.text})
message = {"role": "tool", "content": content}
else:
# Text-only content
message = {"role": "tool", "content": tool_response.text or ""}
add_messages.append(message)
# Handle image data
if tool_response.image:
# Add new image data
if isinstance(tool_response.image, list):
# Ensure all elements in the list are valid image objects
for img in tool_response.image:
if img is not None: # Add a check to ensure the image is not None
new_images_this_turn.append(img) # Using local variable
else:
# Ensure the image is not None
if tool_response.image is not None:
new_images_this_turn.append(tool_response.image) # Using local variable
# Handle video data
if tool_response.video:
# Currently not supported, raise informative error
logger.warning("Multimedia type 'video' is not currently supported. Only 'image' is supported.")
raise NotImplementedError(
"Multimedia type 'video' is not currently supported. Only 'image' is supported."
)
if tool_reward is not None:
agent_data.tool_rewards.append(tool_reward)
agent_data.messages.extend(add_messages)
if self.tool_parser_name == "gpt-oss":
logger.info("manually format tool responses for gpt-oss")
tool_response_text = build_gpt_oss_tool_response_text(add_messages, tool_call_names)
response_ids = await self.loop.run_in_executor(
None, lambda: self.tokenizer.encode(tool_response_text, add_special_tokens=False)
)
else:
response_ids = await self.apply_chat_template(
add_messages,
images=new_images_this_turn, # Using local variable
videos=None,
remove_system_prompt=True,
)
if len(agent_data.response_mask) + len(response_ids) >= self.response_length:
return AgentState.TERMINATED
# Update prompt_ids and response_mask
if new_images_this_turn:
if agent_data.image_data is None:
agent_data.image_data = []
elif not isinstance(agent_data.image_data, list):
agent_data.image_data = [agent_data.image_data]
for img in new_images_this_turn:
agent_data.image_data.append(img)
agent_data.prompt_ids += response_ids
agent_data.response_mask += [0] * len(response_ids)
if agent_data.response_logprobs:
agent_data.response_logprobs += [0.0] * len(response_ids)
agent_data.user_turns += 1
return AgentState.GENERATING
async def _handle_interacting_state(self, agent_data: AgentData) -> AgentState:
"""Handle the interacting state: get user input from interaction."""
(
should_terminate_sequence,
interaction_responses,
reward,
metrics,
) = await agent_data.interaction.generate_response(
agent_data.request_id, agent_data.messages, **agent_data.interaction_kwargs
)
agent_data.user_turns += 1
add_messages: list[dict[str, Any]] = [{"role": "user", "content": interaction_responses}]
agent_data.messages.extend(add_messages)
if reward is not None:
agent_data.turn_scores.append(reward)
# Update prompt with user responses (similar to _handle_processing_tools_state)
response_ids = await self.apply_chat_template(
add_messages,
remove_system_prompt=True,
)
# Update prompt_ids and response_mask
agent_data.prompt_ids += response_ids
agent_data.response_mask += [0] * len(response_ids)
if agent_data.response_logprobs:
agent_data.response_logprobs += [0.0] * len(response_ids)
# double check prompt
# Check termination condition
if should_terminate_sequence:
return AgentState.TERMINATED
else:
return AgentState.GENERATING
async def _call_tool(
self, tool_call: FunctionCall, tools_kwargs: dict[str, Any], agent_data: AgentData
) -> tuple[ToolResponse, float, dict]:
"""Call tool and return tool response."""
tool, instance_id = None, None
try:
# TODO: append malformed tool_call to the prompt: invalid function name or arguments
tool_name = tool_call.name
tool_args = json.loads(tool_call.arguments)
tool = self.tools[tool_name]
kwargs = tools_kwargs.get(tool_name, {})
instance_id, _ = await tool.create(create_kwargs=kwargs.get("create_kwargs", {}))
tool_execution_response, tool_reward, res = await tool.execute(
instance_id, tool_args, agent_data=agent_data
)
except Exception as e:
logger.warning(f"Error when executing tool: {e}")
return (
ToolResponse(
text=f"Error when executing tool: {e}",
),
0.0,
{},
)
finally:
if tool and instance_id:
await tool.release(instance_id)
tool_response_text = tool_execution_response.text
if tool_response_text and len(tool_response_text) > self.max_tool_response_length:
if self.tool_response_truncate_side == "left":
tool_response_text = tool_response_text[: self.max_tool_response_length] + "...(truncated)"
elif self.tool_response_truncate_side == "right":
tool_response_text = "(truncated)..." + tool_response_text[-self.max_tool_response_length :]
else:
length = self.max_tool_response_length // 2
tool_response_text = tool_response_text[:length] + "...(truncated)..." + tool_response_text[-length:]
# Create ToolResponse from tool execution result
tool_response_kwargs = {"text": tool_response_text}
# Add multimedia data if present
for attr_name in ["image", "video"]:
if hasattr(tool_execution_response, attr_name):
attr_value = getattr(tool_execution_response, attr_name)
if attr_value is not None:
tool_response_kwargs[attr_name] = attr_value
return ToolResponse(**tool_response_kwargs), tool_reward, res
def _initialize_interactions(self, interaction_config_file):
"""Initialize interactions from configuration.
Returns:
dict[str, BaseInteraction]: A dictionary mapping interaction names to interaction instances.
"""
if interaction_config_file is None:
return {}
interaction_map = initialize_interactions_from_config(interaction_config_file)
return interaction_map
|
verl__experimental__agent_loop__tool_agent_loop.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
from abc import ABC, abstractmethod
import regex
from pydantic import BaseModel
from verl.utils.ray_utils import get_event_loop
from verl.utils.rollout_trace import rollout_trace_op
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class FunctionCall(BaseModel):
arguments: str
"""
The arguments to call the function with, as generated by the model in JSON
format. Note that the model does not always generate valid JSON, and may
hallucinate parameters not defined by your function schema. Validate the
arguments in your code before calling your function.
"""
name: str
"""The name of the function to call."""
class ToolParser(ABC):
_registry: dict[str, type["ToolParser"]] = {}
def __init__(self, tokenizer) -> None:
self.tokenizer = tokenizer
@abstractmethod
async def extract_tool_calls(self, responses_ids: list[int]) -> tuple[str, list[FunctionCall]]:
"""Extract tool calls from the responses.
Args:
responses_ids (List[int]): The ids of the responses.
Returns:
Tuple[str, List[FunctionCall]]: Content and extracted tool calls.
"""
raise NotImplementedError
@classmethod
def get_tool_parser(cls, name: str, tokenizer):
if name not in cls._registry:
raise ValueError(f"Unknown tool parser: {name}")
return cls._registry[name](tokenizer)
@classmethod
def register(cls, name: str):
def decorator(subclass: type[ToolParser]) -> type[ToolParser]:
cls._registry[name] = subclass
return subclass
return decorator
@ToolParser.register("hermes")
class HermesToolParser(ToolParser):
"""Adapted from https://github.com/vllm-project/vllm/blob/v0.9.1/vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py"""
def __init__(self, tokenizer) -> None:
super().__init__(tokenizer)
self.tool_call_start_token: str = "<tool_call>"
self.tool_call_end_token: str = "</tool_call>"
self.tool_call_regex = regex.compile(r"<tool_call>(.*?)</tool_call>", regex.DOTALL)
@rollout_trace_op
async def extract_tool_calls(self, responses_ids: list[int]) -> tuple[str, list[FunctionCall]]:
loop = get_event_loop()
text = await loop.run_in_executor(None, self.tokenizer.decode, responses_ids)
if self.tool_call_start_token not in text or self.tool_call_end_token not in text:
return text, []
matches = self.tool_call_regex.findall(text)
function_calls = []
for match in matches:
try:
function_call = json.loads(match)
name, arguments = function_call["name"], function_call["arguments"]
function_calls.append(FunctionCall(name=name, arguments=json.dumps(arguments, ensure_ascii=False)))
except Exception as e:
logger.error(f"Failed to decode tool call: {e}")
# remaing text exclude tool call tokens
content = self.tool_call_regex.sub("", text)
return content, function_calls
@ToolParser.register("gpt-oss")
class GptOssToolParser(ToolParser):
"""
Tool parser for gpt-oss model.
Adapted from https://github.com/sgl-project/sglang/blob/main/python/sglang/srt/function_call/gpt_oss_detector.py
Args:
tokenizer: The tokenizer to use.
"""
def __init__(self, tokenizer) -> None:
super().__init__(tokenizer)
# check https://cookbook.openai.com/articles/openai-harmony for more details.
self.cot_pattern = regex.compile(
r"<\|start\|>assistant<\|channel\|>analysis<\|message\|>.*?<\|end\|>", regex.DOTALL
)
# <|start|>assistant may be pre-appended in prompts, so we need to remove it.
self.partial_cot_pattern = regex.compile(r"<\|channel\|>analysis<\|message\|>(.*?)<\|end\|>", regex.DOTALL)
self.tool_call_pattern = regex.compile(
r"<\|start\|>assistant<\|channel\|>[^<]* to=functions\.([^<]+) "
r"<\|constrain\|>json<\|message\|>(.*?)<\|call\|>",
regex.DOTALL,
)
@rollout_trace_op
async def extract_tool_calls(self, responses_ids: list[int]) -> tuple[str, list[FunctionCall]]:
loop = get_event_loop()
# We need to keep special tokens for gpt-oss model for better tool call extraction.
text = await loop.run_in_executor(None, lambda: self.tokenizer.decode(responses_ids, skip_special_tokens=False))
# Need to remove padding tokens for better tool call extraction.
text = text.replace(self.tokenizer.pad_token, "")
# Need to reomve COT since COT may contain tool call tokens.But they are not valid tool calls.
text = regex.sub(self.cot_pattern, "", text)
text = regex.sub(self.partial_cot_pattern, "", text)
# check if there are tool calls in the text by re.findall
matches = regex.findall(self.tool_call_pattern, text)
if not matches:
return text, []
function_calls = []
for match in matches:
try:
name, arguments = match[0], match[1]
# don't check if arguments is valid JSON and leave it to client
function_calls.append(FunctionCall(name=name, arguments=arguments))
except Exception as e:
logger.error(f"Failed to decode tool call: {e}")
# remaing text exclude tool call tokens
content = regex.sub(self.tool_call_pattern, "", text)
return content, function_calls
|
verl__experimental__agent_loop__tool_parser.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Any
def resolve_config_path(config_path: str) -> str:
"""Resolve agent loop configuration file path.
In multi-node Ray training, relative paths may not resolve correctly
because the working directory on remote nodes can differ from the driver node.
This function resolves relative paths by checking multiple locations in order:
1. If already absolute, return as-is
2. Try current working directory
3. Try relative to verl package installation (project root)
Args:
config_path: Configuration file path (relative or absolute)
Returns:
Absolute path to the configuration file
Raises:
FileNotFoundError: If the configuration file cannot be found
"""
# Return absolute paths unchanged
if os.path.isabs(config_path):
return config_path
# Try current working directory first
cwd = os.path.abspath(os.getcwd())
cwd_path = os.path.abspath(os.path.join(cwd, config_path))
if (cwd_path == cwd or cwd_path.startswith(cwd + os.sep)) and os.path.exists(cwd_path):
return cwd_path
# Try relative to verl project root (where verl package is installed)
try:
import verl
verl_package_dir = os.path.abspath(os.path.dirname(verl.__file__))
# Strategy 1: For development/editable installs.
project_root = os.path.dirname(verl_package_dir)
dev_path = os.path.abspath(os.path.join(project_root, config_path))
if (dev_path == project_root or dev_path.startswith(project_root + os.sep)) and os.path.exists(dev_path):
return dev_path
# Strategy 2: For standard package installations.
install_path = os.path.abspath(os.path.join(verl_package_dir, config_path))
if (install_path == verl_package_dir or install_path.startswith(verl_package_dir + os.sep)) and os.path.exists(
install_path
):
return install_path
except (ImportError, AttributeError):
pass # verl not installed or __file__ not available
# File not found - raise clear error
raise FileNotFoundError(
f"Agent loop configuration file not found: {config_path}. Tried current directory and verl project root."
)
# tokenizer.apply_chat_template is not working properly for gpt-oss model.
# Because the chat template requires tool call messages to parse tool response messages
# so we need to format the tool response manually.
def format_gpt_oss_tool_response_manually(tool_response: str, tool_call_name: str) -> str:
"""Format tool response for gpt-oss model.
Args:
tool_response: Tool response string
tool_call_name: Name of the tool that was called
Returns:
Formatted tool response string
"""
return f"<|start|>functions.{tool_call_name} to=assistant<|channel|>commentary<|message|>{tool_response}<|end|>"
def add_generation_prompt_for_gpt_oss(message_content: str) -> str:
"""Add generation prompt for gpt-oss model.
Args:
message_content: Message content string
Returns:
Message content string with generation prompt
"""
return message_content + "<|start|>assistant"
def build_gpt_oss_tool_response_text(messages: list[dict[str, Any]], tool_call_names: list[str]) -> str:
"""Build gpt-oss tool response text (manual formatting + generation prompt)."""
tool_response_texts: list[str] = []
for i, tool_msg in enumerate(messages):
actual_tool_name = tool_call_names[i]
formatted = format_gpt_oss_tool_response_manually(tool_msg["content"], actual_tool_name)
tool_response_texts.append(formatted)
return add_generation_prompt_for_gpt_oss("".join(tool_response_texts))
|
verl__experimental__agent_loop__utils.py
|
# Copyright 2025 Amazon.com Inc and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from collections.abc import Sized
from omegaconf import DictConfig
from torch.utils.data import Sampler
from verl import DataProto
class AbstractSampler(Sampler[int]):
"""Abstract interface for custom samplers."""
@abstractmethod
def __init__(
self,
data_source: Sized,
data_config: DictConfig,
):
pass
class AbstractCurriculumSampler(AbstractSampler):
"""Experimental interface for curriculum learning samplers."""
@abstractmethod
def update(self, batch: DataProto) -> None:
pass
|
verl__experimental__dataset__sampler.py
|
# Copyright 2025 Amazon.com Inc and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dataset class that enables dynamic data generation strategies between iterations of training.
This class extends RLHFDataset and uses an AbstractDataGen instance to generate data.
This is especially useful in settings where proposer model generates new tasks based
on rollout data.
"""
import logging
from abc import ABC, abstractmethod
from typing import Optional
import datasets
from omegaconf import DictConfig
from torch.utils.data import Dataset
from transformers import PreTrainedTokenizer, ProcessorMixin
from verl import DataProto
from verl.utils.dataset import RLHFDataset
from verl.utils.import_utils import load_extern_object
logger = logging.getLogger(__name__)
class AbstractDataGenerator(ABC):
def __init__(self, config: DictConfig):
self.config = config
@abstractmethod
def generate(self, dataset: Dataset) -> datasets.Dataset:
"""
Generate method must be implemented by subclasses.
Args:
dataset: The dataset to generate from.
Returns:
Processed data or result as implemented by the subclass.
"""
pass
class MockDataGenerator(AbstractDataGenerator):
"""
A noop data gen class that only reappends the first datapoint.
This class is useful as a placeholder and testing.
"""
def __init__(self, config: DictConfig = None):
super().__init__(config)
def generate(self, dataset: Dataset) -> datasets.Dataset:
print("MockDataGenerator: No operation performed on the dataset.")
return dataset.dataframe.select([0])
class DynamicGenDataset(RLHFDataset):
"""
A dataset class that uses a data generation strategy to process data.
This class extends RLHFDataset and uses an AbstractDataGen instance to generate data.
"""
def __init__(
self,
data_files: str | list[str],
tokenizer: PreTrainedTokenizer,
config: DictConfig,
processor: Optional[ProcessorMixin] = None,
):
super().__init__(data_files, tokenizer, config, processor)
self.datagen: AbstractDataGenerator = config.datagen
assert "datagen" in config and config.datagen.get("path", None) is not None, (
f"datagen path is not set in config: {config}"
)
# Dynamically load the custom datagen class
datagen_cls = load_extern_object(config.datagen.path, config.datagen.name)
# Verify that the custom datagen class inherits from AbstractDataGenerator
abs_cls = AbstractDataGenerator
if not issubclass(datagen_cls, abs_cls):
raise TypeError(
f"The custom datagen class '{config.datagen.name}' from '{config.datagen.path}'"
+ " must inherit from {abs_cls}"
)
self.data_generator = datagen_cls(config.datagen)
self.on_batch_end()
def append_dataframe(self, new_dataframe: datasets.Dataset):
new_dataframe = self.maybe_filter_out_long_prompts(new_dataframe)
self.dataframe = datasets.concatenate_datasets([self.dataframe, new_dataframe])
logger.info(f"new dataset len: {len(self.dataframe)}")
def on_batch_end(self, batch: DataProto) -> None:
"""
Generate data using the provided data generation strategy.
Note: This method is intended to change the dataset after each training batch.
"""
new_data = self.data_generator.generate(self)
self.append_dataframe(new_data)
|
verl__experimental__dynamic_dataset__dynamicgen_dataset.py
|
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import os
from typing import Any, Optional, Sequence
import hydra
import numpy as np
import ray
from omegaconf import DictConfig
from verl.experimental.agent_loop.agent_loop import (
AgentLoopManager,
AgentLoopOutput,
AgentLoopWorker,
AsyncLLMServerManager,
DictConfigWrap,
_agent_loop_registry,
get_trajectory_info,
)
from verl.experimental.agent_loop.prometheus_utils import update_prometheus_config
from verl.protocol import DataProto
from verl.single_controller.ray import RayWorkerGroup
from verl.utils.rollout_trace import (
rollout_trace_attr,
rollout_trace_op,
)
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class FullyAsyncLLMServerManager(AsyncLLMServerManager):
@rollout_trace_op
async def generate_for_partial(
self,
request_id,
*,
prompt_ids: list[int],
sampling_params: dict[str, Any],
image_data: Optional[list[Any]] = None,
video_data: Optional[list[Any]] = None,
) -> tuple[list[Any], list[Any], Any] | tuple[Sequence[int], list[float], bool]:
"""Generate tokens from prompt ids, used for async partial.
Args:
request_id (str): request id for sticky session.
prompt_ids (List[int]): List of prompt token ids.
sampling_params (Dict[str, Any]): Sampling parameters for the chat completion.
Returns:
output: A tuple representing the generation output.
- Element 0 (Sequence[int]): Generated response token IDs.
- Element 1 (list[float]): Log probabilities for the response token IDs.
- Element 2 (bool): A flag or status indicating cancellation.
"""
server = self._choose_server(request_id)
output = await server.generate_for_partial.remote(
request_id=request_id,
prompt_ids=prompt_ids,
sampling_params=sampling_params,
image_data=image_data,
video_data=video_data,
)
return output
@ray.remote
class FullyAsyncAgentLoopWorker(AgentLoopWorker):
def __init__(
self,
config: DictConfig,
server_handles: list[ray.actor.ActorHandle],
reward_loop_worker_handles: list[ray.actor.ActorHandle] = None,
):
self.server_manager = FullyAsyncLLMServerManager(config, server_handles)
super().__init__(config, server_handles, reward_loop_worker_handles)
# A shared cancellation event for all agent loops running on this worker.
self.cancellation_event = asyncio.Event()
async def generate_sequences_no_post(
self, batch: DataProto, partial_output_list: Optional[list[AgentLoopOutput]]
) -> tuple[list[AgentLoopOutput], bool] | tuple[DataProto, bool]:
"""Generate sequences from agent loop.
Args:
batch (DataProto): Input batch.
partial_output_list: Optional[List[AgentLoopOutput]]: already rollout result.
Returns:
list[AgentLoopOutput]: List of agent loop outputs, one per sample in the batch.
"""
config = self.config.actor_rollout_ref.rollout
sampling_params = dict(
temperature=config.temperature,
top_p=config.top_p,
repetition_penalty=1.0,
logprobs=config.calculate_log_probs,
)
# override sampling params for validation
if batch.meta_info.get("validate", False):
sampling_params["top_p"] = config.val_kwargs.top_p
sampling_params["temperature"] = config.val_kwargs.temperature
if "agent_name" not in batch.non_tensor_batch:
default_agent_loop = config.agent.default_agent_loop
batch.non_tensor_batch["agent_name"] = np.array([default_agent_loop] * len(batch), dtype=object)
if "index" in batch.non_tensor_batch:
index = batch.non_tensor_batch["index"]
else:
index = np.arange(len(batch))
trajectory_info = await get_trajectory_info(
batch.meta_info.get("global_steps", -1), index, batch.meta_info.get("validate", False)
)
if not partial_output_list:
partial_output_list = [None] * len(batch)
try:
tasks = []
for i in range(len(batch)):
kwargs = {k: v[i] for k, v in batch.non_tensor_batch.items()}
kwargs["output"] = partial_output_list[i]
tasks.append(
asyncio.create_task(self._partial_run_agent_loop(sampling_params, trajectory_info[i], **kwargs))
)
outputs = await asyncio.gather(*tasks)
except Exception:
logger.exception("_partial_run_agent_loop failed")
raise
is_cancel = any(output.extra_fields.get("is_cancel", False) for output in outputs)
if not is_cancel:
output = self._postprocess(outputs)
output = self._addition_process(output)
return output, is_cancel
return outputs, is_cancel
def _addition_process(self, output: DataProto):
"""collect metirics"""
metrics = output.meta_info.pop("metrics") # List[Dict[str, str]]
processing_times_list = [item["generate_sequences"] for item in metrics]
tool_calls_times_list = [item["tool_calls"] for item in metrics]
output.non_tensor_batch["processing_times"] = processing_times_list
output.non_tensor_batch["tool_calls_times"] = tool_calls_times_list
return output
async def _partial_run_agent_loop(
self,
sampling_params: dict[str, Any],
trajectory: dict[str, Any],
*,
agent_name: str,
**kwargs,
) -> AgentLoopOutput:
# Completed, return directly
if kwargs["output"] is not None and not kwargs["output"].extra_fields.get("is_cancel", False):
logger.info("In _partial_run_agent_loop, already completed, return derictly!")
return kwargs["output"]
try:
with rollout_trace_attr(
step=trajectory["step"],
sample_index=trajectory["sample_index"],
rollout_n=trajectory["rollout_n"],
validate=trajectory["validate"],
name="agent_loop",
):
assert agent_name in _agent_loop_registry, (
f"Agent loop {agent_name} not registered, registered agent loops: {_agent_loop_registry.keys()}"
)
agent_loop_config = _agent_loop_registry[agent_name]
agent_loop = hydra.utils.instantiate(
config=agent_loop_config,
trainer_config=DictConfigWrap(config=self.config),
server_manager=self.server_manager,
tokenizer=self.tokenizer,
processor=self.processor,
dataset_cls=self.dataset_cls,
dataset_config=DictConfigWrap(config=self.config.data),
)
output: AgentLoopOutput = await agent_loop.run(
sampling_params, cancellation_event=self.cancellation_event, **kwargs
)
if not output.extra_fields.get("is_cancel", False):
kwargs.pop("output", None)
output = await self._agent_loop_postprocess(output, **kwargs)
return output
except Exception:
logger.exception("Agent_loop run failed")
raise
async def cancel_agent_loops(self):
"""Set the shared cancellation event to stop all agent loops."""
self.cancellation_event.set()
async def resume_agent_loops(self):
"""Clear the shared cancellation event."""
self.cancellation_event.clear()
class FullyAsyncAgentLoopManager(AgentLoopManager):
def __init__(
self,
config: DictConfig,
worker_group: RayWorkerGroup = None,
reward_loop_worker_handles: list[ray.actor.ActorHandle] = None,
):
self.config = config
self.worker_group = worker_group
self.reward_loop_worker_handles = reward_loop_worker_handles
self.agent_loop_workers_class = FullyAsyncAgentLoopWorker
# Select rollout replica class based on rollout name
rollout_name = config.actor_rollout_ref.rollout.name
if rollout_name == "sglang":
from verl.experimental.fully_async_policy.sglang_rollout.sglang_async_server import FullyAsyncSGLangReplica
self.rollout_replica_class = FullyAsyncSGLangReplica
print("[FullyAsyncAgentLoopManager] SGLang replica class selected")
elif rollout_name == "vllm":
from verl.experimental.fully_async_policy.vllm_rollout.vllm_async_server import FullyAsyncvLLMReplica
self.rollout_replica_class = FullyAsyncvLLMReplica
print("[FullyAsyncAgentLoopManager] vLLM replica class selected")
else:
raise ValueError(f"Unsupported rollout name: {rollout_name}. Supported values are 'sglang' and 'vllm'.")
self.rollout_replicas = None
self.server_handles = None
self.server_addresses = None
self.agent_loop_workers = None
@classmethod
async def create(
cls,
config: DictConfig,
worker_group: RayWorkerGroup = None,
reward_loop_worker_handles: list[ray.actor.ActorHandle] = None,
):
instance = cls(config, worker_group, reward_loop_worker_handles)
await instance._async_init()
return instance
async def _async_init(self):
await self._initialize_llm_servers_async()
self._init_agent_loop_workers()
async def _initialize_llm_servers_async(self):
rollout_world_size = (
self.config.actor_rollout_ref.rollout.tensor_model_parallel_size
* self.config.actor_rollout_ref.rollout.data_parallel_size
* self.config.actor_rollout_ref.rollout.pipeline_model_parallel_size
)
world_size = (
self.worker_group.world_size
if self.worker_group
else self.config.trainer.n_gpus_per_node * self.config.trainer.nnodes
)
num_replicas = world_size // rollout_world_size
rollout_config = self.config.actor_rollout_ref.rollout
model_config = self.config.actor_rollout_ref.model
self.rollout_replicas = [
self.rollout_replica_class(
replica_rank=replica_rank,
config=rollout_config,
model_config=model_config,
gpus_per_node=self.config.trainer.n_gpus_per_node,
)
for replica_rank in range(num_replicas)
]
if self.worker_group:
await asyncio.gather(*[server.init_hybrid(self.worker_group) for server in self.rollout_replicas])
else:
await asyncio.gather(*[server.init_standalone() for server in self.rollout_replicas])
self.server_handles = [server._server_handle for server in self.rollout_replicas]
self.server_addresses = [server._server_address for server in self.rollout_replicas]
print(f"AgentLoopManager: {self.server_addresses}")
# Update Prometheus configuration with server addresses
if rollout_config.prometheus.enable:
if rollout_config.disable_log_stats:
raise ValueError("PROMETHEUS needs disable_log_stats==False, but it is currently True.")
await asyncio.to_thread(
update_prometheus_config, rollout_config.prometheus, self.server_addresses, rollout_config.name
)
async def generate_single_sample_async(
self,
sample: DataProto,
partial_output_list: Optional[list[AgentLoopOutput]],
) -> tuple[list[AgentLoopOutput], bool] | tuple[DataProto, bool]:
"""
Asynchronously process a single sample
Args:
sample: Single sample data
partial_output_list: Optional[List[AgentLoopOutput]]: already rollout result.
Returns:
list[AgentLoopOutput]: Processing results
"""
worker = self._select_best_worker()
output_future = worker.generate_sequences_no_post.remote(sample, partial_output_list)
return await asyncio.wrap_future(output_future.future())
def _select_best_worker(self):
"""Select the best worker, simple round-robin load balancing"""
if not hasattr(self, "_worker_index"):
self._worker_index = 0
worker = self.agent_loop_workers[self._worker_index]
self._worker_index = (self._worker_index + 1) % len(self.agent_loop_workers)
return worker
async def cancel(self):
worker_cancel_tasks = [worker.cancel_agent_loops.remote() for worker in self.agent_loop_workers]
rollout_cancel_tasks = [replica.cancel() for replica in self.rollout_replicas]
await asyncio.gather(*rollout_cancel_tasks, *worker_cancel_tasks)
async def resume(self):
rollout_resume_tasks = [replica.resume() for replica in self.rollout_replicas]
worker_resume_tasks = [worker.resume_agent_loops.remote() for worker in self.agent_loop_workers]
await asyncio.gather(*rollout_resume_tasks, *worker_resume_tasks)
async def wake_up(self):
await asyncio.gather(*[replica.wake_up() for replica in self.rollout_replicas])
async def sleep(self):
await asyncio.gather(*[replica.sleep() for replica in self.rollout_replicas])
async def clear_kv_cache(self):
await asyncio.gather(*[replica.clear_kv_cache() for replica in self.rollout_replicas])
|
verl__experimental__fully_async_policy__agent_loop__agent_loop.py
|
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import Any, Optional
from uuid import uuid4
from verl.experimental.agent_loop import AgentLoopBase
from verl.experimental.agent_loop.agent_loop import AgentLoopOutput, register
from verl.utils.profiler import simple_timer
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
@register("partial_single_turn_agent")
class PartialSingleTurnAgentLoop(AgentLoopBase):
"""Naive agent loop that only do single turn chat completion."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.prompt_length = self.config.actor_rollout_ref.rollout.prompt_length
self.response_length = self.config.actor_rollout_ref.rollout.response_length
self.apply_chat_template_kwargs = self.config.data.get("apply_chat_template_kwargs", {})
async def run(self, sampling_params: dict[str, Any], **kwargs) -> AgentLoopOutput:
output: Optional[AgentLoopOutput] = kwargs.get("output", None)
messages = list(kwargs["raw_prompt"])
multi_modal_data = await self.process_vision_info(messages)
images = multi_modal_data.get("images")
videos = multi_modal_data.get("videos")
param_version = kwargs.get("param_version", 0)
metrics = {}
request_id = uuid4().hex
param_version_start = param_version
param_version_end = param_version
if not output:
# TODO(baiyan): it is supposed to use the correct processor,
# but I found the async training would hang if use_correct_processor=True.
# so we use the tokenizer to tokenize the prompt for now.
use_correct_processor = False
if self.processor is not None and use_correct_processor:
def get_prompt_ids():
raw_prompt = self.processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False,
**self.apply_chat_template_kwargs,
)
model_inputs = self.processor(text=[raw_prompt], images=images, videos=videos, return_tensors="pt")
return model_inputs.pop("input_ids").squeeze(0).tolist()
prompt_ids = await self.loop.run_in_executor(None, get_prompt_ids)
# Refer to the implementation of the run function in verl/experimental/agent_loop/single_turn_agent_loop.py
elif self.processor is not None:
prompt_ids = await self.apply_chat_template(
messages,
images=images,
videos=videos,
)
else:
prompt_ids = await self.loop.run_in_executor(
None,
lambda: self.tokenizer.apply_chat_template(
messages, add_generation_prompt=True, tokenize=True, **self.apply_chat_template_kwargs
),
)
else:
if output.extra_fields.get("is_cancel", False):
# Resume the paused sample,
# add the result directly after prompt_ids,
# and reset generate_sequences metric
prompt_ids = output.prompt_ids + output.response_ids
metrics["generate_sequences"] = output.metrics.generate_sequences
param_version_start = output.extra_fields.get("param_version_start", param_version)
else:
# In the same batch of samples,
# some are canceled and some are not.
# The samples without partial rollout are returned directly.
return output
with simple_timer("generate_sequences", metrics):
response_ids, response_logprobs, is_cancel = await self.server_manager.generate_for_partial(
request_id=request_id,
prompt_ids=prompt_ids,
sampling_params=sampling_params,
image_data=images,
video_data=videos,
)
if not output:
response_mask = [1] * len(response_ids)
else:
# Pause the sample to be resumed, add the output result to response_ids, and reset response_mask
prompt_ids = output.prompt_ids
response_logprobs = output.response_logprobs + response_logprobs
response_ids = output.response_ids + response_ids
response_mask = [1] * len(response_ids)
if len(response_ids) >= self.response_length:
is_cancel = False
return AgentLoopOutput(
prompt_ids=prompt_ids,
response_ids=response_ids[: self.response_length],
response_mask=response_mask[: self.response_length],
response_logprobs=response_logprobs[: self.response_length],
num_turns=2,
metrics=metrics,
extra_fields={
"is_cancel": is_cancel,
"param_version_start": param_version_start,
"param_version_end": param_version_end,
"turn_scores": [],
"tool_rewards": [],
},
multi_modal_data=multi_modal_data,
# multi_modal_data={"image": image_data} if image_data is not None else {},
)
|
verl__experimental__fully_async_policy__agent_loop__partial_single_turn_agent_loop.py
|
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import os
from typing import Any, Optional
from uuid import uuid4
from verl.experimental.agent_loop.agent_loop import AgentLoopOutput, register
from verl.experimental.agent_loop.tool_agent_loop import AgentData, AgentState, ToolAgentLoop
from verl.utils.profiler import simple_timer
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
@register("async_partial_tool_agent")
class AsyncPartialToolAgentLoop(ToolAgentLoop):
"""
Support for partial rollout with multiple tool invocations in Agent Loop
"""
def __init__(self, trainer_config, **kwargs):
super().__init__(trainer_config, **kwargs)
self.enable_partial_rollout = trainer_config.config.async_training.get("partial_rollout", False)
# async def run(self, sampling_params: dict[str, Any], **kwargs) -> AgentLoopOutput:
async def run(
self, sampling_params: dict[str, Any], *, cancellation_event: asyncio.Event = None, **kwargs
) -> AgentLoopOutput:
"""
Main entrance, supports interruption/recovery
Args:
sampling_params: Sampling parameters
cancellation_event: cancellationn sginal
**kwargs: Contains output (for recovery), raw_prompt, param_version, etc.
Returns:
AgentLoopOutput: Include the is_cancel flag
"""
param_version = kwargs.get("param_version", 0)
agent_data = None
state = None
# 1. check whether is the partial task
output: Optional[AgentLoopOutput] = kwargs.get("output", None)
if output and output.extra_fields.get("is_cancel", False):
agent_data, state = self._restore_from_output(output)
logger.info(f"[PartialToolAgent] Resuming from {state.value}")
else:
if output and not output.extra_fields.get("is_cancel", False):
# Completed, return directly
return output
agent_data = await self._init_agent_data(kwargs, param_version)
state = AgentState.PENDING
logger.info("[PartialToolAgent] Start from scratch")
# 2. run state machine
state = await self._run_state_machine(agent_data, state, sampling_params, cancellation_event)
# 3. bulid output
if state == AgentState.TERMINATED:
return self._build_completed_output(agent_data, param_version)
else:
# build cancelled output
return self._build_cancelled_output(agent_data, state)
async def _init_agent_data(self, kwargs: dict, param_version: int) -> AgentData:
messages = list(kwargs["raw_prompt"])
multi_modal_data = await self.process_vision_info(messages)
image_data = multi_modal_data.get("images")
video_data = multi_modal_data.get("videos")
metrics = {}
request_id = uuid4().hex
tools_kwargs = kwargs.get("tools_kwargs", {})
# Initialize interaction if needed
interaction = None
interaction_kwargs = {}
if self.interaction_config_file:
interaction_kwargs = kwargs["extra_info"]["interaction_kwargs"]
if "name" not in interaction_kwargs:
raise ValueError("'name' key is required in interaction_kwargs")
interaction_name = interaction_kwargs["name"]
if interaction_name not in self.interaction_map:
raise ValueError(
f"Interaction '{interaction_name}' not found in interaction_map. Available interactions: "
f"{list(self.interaction_map.keys())}"
)
interaction = self.interaction_map[interaction_name]
await interaction.start_interaction(request_id, **interaction_kwargs)
# Create AgentData instance to encapsulate all state
agent_data = AgentData(
messages=messages,
image_data=image_data,
video_data=video_data,
metrics=metrics,
request_id=request_id,
tools_kwargs=tools_kwargs,
interaction=interaction,
interaction_kwargs=interaction_kwargs,
)
# additional param version record
agent_data.extra_fields["param_version_start"] = param_version
agent_data.extra_fields["param_version_end"] = param_version
return agent_data
def _restore_from_output(self, output: AgentLoopOutput) -> tuple[AgentData, AgentState]:
"""restore AgentState and AgentData from output"""
agent_data = output.extra_fields.get("agent_data", None)
agent_state = output.extra_fields.get("agent_state", None)
if agent_data is None or agent_state is None:
raise ValueError(f"Unexpected situation: agent_data is {agent_data}, agent_state is {agent_state}")
return agent_data, agent_state
async def _run_state_machine(
self,
agent_data: AgentData,
state: AgentState,
sampling_params: dict[str, Any],
cancellation_event: asyncio.Event = None,
) -> AgentState:
"""
State machine.
Currently, interruptions are only supported to occur in the GENERATING state or other states have ended.
"""
# State machine loop
while state != AgentState.TERMINATED:
if cancellation_event and cancellation_event.is_set():
logger.info(f"[PartialToolAgent] Cancellation detected. Interrupted before/at state: {state.value}")
return state
if state == AgentState.PENDING:
state = await self._handle_pending_state(agent_data, sampling_params)
elif state == AgentState.GENERATING:
state = await self._handle_generating_state_partial(agent_data, sampling_params)
elif state == AgentState.PROCESSING_TOOLS:
state = await self._handle_processing_tools_state(agent_data)
elif state == AgentState.INTERACTING:
state = await self._handle_interacting_state(agent_data)
else:
logger.error(f"[PartialToolAgent] Invalid state: {state}")
return AgentState.TERMINATED
return AgentState.TERMINATED
async def _handle_generating_state_partial(
self, agent_data: AgentData, sampling_params: dict[str, Any], ignore_termination: bool = False
) -> AgentState:
"""
Handle GENERATING state, support partial rollout
"""
add_messages: list[dict[str, Any]] = []
with simple_timer("generate_sequences", agent_data.metrics):
# partial interface
if self.enable_partial_rollout:
response_ids, log_probs, is_cancel = await self.server_manager.generate_for_partial(
request_id=agent_data.request_id,
prompt_ids=agent_data.prompt_ids,
sampling_params=sampling_params,
image_data=agent_data.image_data,
video_data=agent_data.video_data,
)
if is_cancel:
# Save the generated parts
agent_data.response_ids = response_ids
agent_data.prompt_ids += agent_data.response_ids
agent_data.response_mask += [1] * len(response_ids)
if log_probs:
agent_data.response_logprobs += log_probs
if not ignore_termination and len(agent_data.response_mask) >= self.response_length:
# If response_length has reached the limit,
# it is considered to have ended normally.
agent_data.assistant_turns += 1
return AgentState.TERMINATED
return AgentState.GENERATING
else:
# original generate interface
output = await self.server_manager.generate(
request_id=agent_data.request_id,
prompt_ids=agent_data.prompt_ids,
sampling_params=sampling_params,
image_data=agent_data.image_data,
video_data=agent_data.video_data,
)
response_ids = output.token_ids
log_probs = output.log_probs
agent_data.assistant_turns += 1
agent_data.response_ids = response_ids
agent_data.prompt_ids += agent_data.response_ids
agent_data.response_mask += [1] * len(agent_data.response_ids)
if log_probs:
agent_data.response_logprobs += log_probs
if not ignore_termination and len(agent_data.response_mask) >= self.response_length:
return AgentState.TERMINATED
if self.max_assistant_turns and agent_data.assistant_turns >= self.max_assistant_turns:
return AgentState.TERMINATED
if self.max_user_turns and agent_data.user_turns >= self.max_user_turns:
return AgentState.TERMINATED
# Extract tool calls
_, agent_data.tool_calls = await self.tool_parser.extract_tool_calls(agent_data.response_ids)
# Handle interaction if needed
if self.interaction_config_file:
assistant_message = await self.loop.run_in_executor(
None, lambda: self.tokenizer.decode(agent_data.response_ids, skip_special_tokens=True)
)
add_messages.append({"role": "assistant", "content": assistant_message})
agent_data.messages.extend(add_messages)
# Determine next state
if agent_data.tool_calls:
return AgentState.PROCESSING_TOOLS
elif self.interaction_config_file:
return AgentState.INTERACTING
else:
return AgentState.TERMINATED
def _build_completed_output(self, agent_data: AgentData, param_version: int) -> AgentLoopOutput:
"""build completed output"""
response_ids = agent_data.prompt_ids[-len(agent_data.response_mask) :]
prompt_ids = agent_data.prompt_ids[: len(agent_data.prompt_ids) - len(agent_data.response_mask)]
multi_modal_data = {}
if agent_data.image_data is not None:
multi_modal_data["image"] = agent_data.image_data
if agent_data.video_data is not None:
multi_modal_data["video"] = agent_data.video_data
output = AgentLoopOutput(
prompt_ids=prompt_ids,
response_ids=response_ids[: self.response_length],
response_mask=agent_data.response_mask[: self.response_length],
multi_modal_data=multi_modal_data,
response_logprobs=agent_data.response_logprobs[: self.response_length]
if agent_data.response_logprobs
else None,
num_turns=agent_data.user_turns + agent_data.assistant_turns + 1,
metrics=agent_data.metrics,
extra_fields={},
)
output.extra_fields.update(
{
"turn_scores": agent_data.turn_scores,
"tool_rewards": agent_data.tool_rewards,
"is_cancel": False,
"param_version_start": agent_data.extra_fields["param_version_start"],
"param_version_end": param_version,
}
)
return output
def _build_cancelled_output(self, agent_data: AgentData, state: AgentState) -> AgentLoopOutput:
"""build cancelled output"""
return AgentLoopOutput(
prompt_ids=[],
response_ids=[],
response_mask=[],
multi_modal_data={},
response_logprobs=None,
num_turns=0,
metrics=agent_data.metrics,
extra_fields={
"is_cancel": True,
"agent_data": agent_data,
"agent_state": state,
},
)
|
verl__experimental__fully_async_policy__agent_loop__partial_tool_agent_loop.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import os
import threading
import torch
from omegaconf import DictConfig
from ray.util.collective import collective
from verl.single_controller.base.decorator import Dispatch, register
from verl.utils.device import get_torch_device, is_npu_available
from verl.utils.distributed import stateless_init_process_group
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class BaseDetachNcclSync:
_bucket_size_mb = 1024.0
_sync_history = []
_max_history_size = 20
_last_avg_bucket_size = 1024.0
def __init__(self, config: DictConfig, role: str):
self._bg_loop = asyncio.new_event_loop()
self._bg_thread = threading.Thread(
target=self._start_background_loop, args=(self._bg_loop,), name="rollout_actor_async_worker", daemon=True
)
self._bg_thread.start()
logger.info(f"[DetachNcclSync] Background thread for SGLang sync started. PID: {os.getpid()}")
@classmethod
def get_bucket_size_mb(cls):
return cls._bucket_size_mb
@classmethod
def get_last_avg_bucket_size(cls):
return cls._last_avg_bucket_size
@register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=True)
def get_last_avg_bucket_size_remote(self):
return BaseDetachNcclSync._last_avg_bucket_size
@classmethod
def record_sync_metrics(cls, bucket_size_mb, sync_time):
"""Dynamically adjust the bucket size based on past synchronization times."""
bucket_size_mb_value = bucket_size_mb[0] if isinstance(bucket_size_mb, list) else bucket_size_mb
print(f"[DetachNcclSync] sync_metrics: bucket_size_mb={bucket_size_mb_value:.2f}MB, sync_time={sync_time:.2f}s")
cls._sync_history.append((bucket_size_mb_value, sync_time))
if len(cls._sync_history) > cls._max_history_size:
cls._sync_history.pop(0)
MIN_BUCKET_SIZE_MB = 512
MAX_BUCKET_SIZE_MB = 8192 # 8GB
if len(cls._sync_history) < 4:
cls._bucket_size_mb = min(MAX_BUCKET_SIZE_MB, cls._bucket_size_mb * 1.5)
else:
times = [t for _, t in cls._sync_history]
buckets = [b for b, _ in cls._sync_history]
recent_avg_time = sum(times[-2:]) / 2
previous_avg_time = sum(times[-4:-2]) / 2
recent_avg_bucket = sum(buckets[-2:]) / 2
previous_avg_bucket = sum(buckets[-4:-2]) / 2
performance_improved = recent_avg_time < previous_avg_time
bucket_increased = recent_avg_bucket > previous_avg_bucket
time_change_ratio = (
abs(recent_avg_time - previous_avg_time) / previous_avg_time if previous_avg_time > 0 else 0.0
)
if time_change_ratio > 0.2:
increase_step, decrease_step = 1.2, 0.8
elif time_change_ratio > 0.1:
increase_step, decrease_step = 1.1, 0.9
elif time_change_ratio > 0.05:
increase_step, decrease_step = 1.05, 0.95
else:
increase_step, decrease_step = 1.02, 0.98
should_increase = (performance_improved and bucket_increased) or (
not performance_improved and not bucket_increased
)
step = increase_step if should_increase else decrease_step
new_size = cls._bucket_size_mb * step
cls._bucket_size_mb = min(MAX_BUCKET_SIZE_MB, max(MIN_BUCKET_SIZE_MB, new_size))
def _start_background_loop(self, loop):
asyncio.set_event_loop(loop)
try:
loop.run_forever()
except Exception as e:
logger.error(f"[DetachNcclSync] Background loop crashed: {e}")
def _run_async_safely(self, coro):
if not self._bg_thread.is_alive():
raise RuntimeError("Background thread for SGLang sync is not running!")
future = asyncio.run_coroutine_threadsafe(coro, self._bg_loop)
return future.result()
def __del__(self):
if hasattr(self, "_bg_loop") and self._bg_loop.is_running():
self._bg_loop.call_soon_threadsafe(self._bg_loop.stop)
if hasattr(self, "_bg_thread") and self._bg_thread.is_alive():
self._bg_thread.join(timeout=1.0)
@register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False)
def init_checkpoint_engine(self, rank_offset: int, actor_num: int, rollout_num: int):
from .checkpoint_engine import CheckpointEngine
current_rank = torch.distributed.get_rank() + rank_offset
actor_ranks = list(range(actor_num))
rollout_ranks = [rank + actor_num for rank in range(rollout_num)]
assert rank_offset == 0 or rank_offset == actor_num
self.checkpoint_engine = CheckpointEngine(
current_rank, actor_ranks, rollout_ranks, self.config.checkpoint_engine.device_buffer_size_M
)
@register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False)
def create_weight_sync_group(self, master_address, master_port, rank_offset, world_size):
rank = torch.distributed.get_rank() + rank_offset
self._weight_sync_group = stateless_init_process_group(
master_address,
master_port,
rank,
world_size,
get_torch_device().current_device(),
)
@staticmethod
def get_inference_model(rollout):
"""
Get models according to different types of inference_engine
Args:
rollout: rollout object
Returns:
model: model object (for vllm) or rollout object itself (for sglang)
"""
inference_engine = rollout.inference_engine
if hasattr(inference_engine, "llm_engine"):
inference_model = inference_engine.llm_engine.model_executor.driver_worker.worker.model_runner.model
elif hasattr(inference_engine, "worker"):
inference_model = inference_engine.worker.model_runner.model
else:
raise AttributeError(
f"Unsupported inference_engine type: {type(inference_engine)}. "
f"Expected LLM (with llm_engine attribute) or WorkerWrapperBase (with worker attribute)."
)
return inference_model
def _sync_sglang_weights(self, inference_model, params, sync_group_name):
bucket_size_bytes = int(self.get_bucket_size_mb() * 1024 * 1024)
actual_bucket_sizes = []
current_batch = []
current_batch_size = 0
def flush_batch():
if current_batch:
actual_bucket_sizes.append(current_batch_size / (1024 * 1024))
self._run_async_safely(self.update_weights(inference_model, iter(current_batch)))
get_torch_device().synchronize()
current_batch.clear()
for key, shape, dtype in self._weights_info:
tensor = torch.empty(shape, dtype=dtype, device=get_torch_device().current_device())
if self._is_actor:
assert key in params
origin_data = params[key]
if hasattr(origin_data, "full_tensor"):
origin_data = origin_data.full_tensor()
if torch.distributed.get_rank() == 0:
tensor.copy_(origin_data)
collective.broadcast(tensor, src_rank=0, group_name=sync_group_name)
tensor_size = tensor.numel() * tensor.element_size()
current_batch.append((key, tensor))
current_batch_size += tensor_size
if current_batch_size >= bucket_size_bytes:
flush_batch()
current_batch_size = 0
flush_batch()
cls = type(self)
cls._last_avg_bucket_size = (
sum(actual_bucket_sizes) / len(actual_bucket_sizes) if actual_bucket_sizes else self.get_bucket_size_mb()
)
# Resume kv_cache after weights sync to restore GPU memory released during pause
if self._is_rollout and self.rollout_device_mesh["infer_tp"].get_local_rank() == 0:
self._run_async_safely(inference_model.resume_memory_occupation(tags=["kv_cache"]))
def _sync_vllm_weights(self, inference_model, params, sync_group_name):
for key, shape, dtype in self._weights_info:
tensor = torch.empty(shape, dtype=dtype, device=get_torch_device().current_device())
if self._is_actor:
assert key in params
origin_data = params[key]
if hasattr(origin_data, "full_tensor"):
origin_data = origin_data.full_tensor()
if torch.distributed.get_rank() == 0:
tensor.copy_(origin_data)
if is_npu_available:
self._weight_sync_group.broadcast(tensor, src=0, stream=get_torch_device().current_stream())
else:
collective.broadcast(tensor, src_rank=0, group_name=sync_group_name)
if self._is_rollout:
inference_model.load_weights([(key, tensor)])
async def update_weights(self, inference_engine, params):
from sglang.srt.weight_sync.utils import update_weights as sgl_update_weights
await sgl_update_weights(
engine=inference_engine,
params_batch=params,
device_mesh_key="infer_tp",
device_mesh=self.rollout_device_mesh,
)
if self.rollout_device_mesh["infer_tp"].get_local_rank() == 0:
await inference_engine.flush_cache()
|
verl__experimental__fully_async_policy__base_detach_sync.py
|
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This logic is largely copied from:
- https://github.com/MoonshotAI/checkpoint-engine
"""
import concurrent.futures
import os
import re
import socket
import subprocess
import threading
from collections.abc import Callable
from functools import lru_cache
from typing import TYPE_CHECKING, Annotated, Any, TypedDict
import torch
import zmq
from pydantic import BaseModel, PlainSerializer, PlainValidator, WithJsonSchema
from ray.util.collective import collective
from verl.utils.device import (
get_device_name,
get_torch_device,
)
if TYPE_CHECKING:
from typing import TypeVar
from typing_extensions import TypedDict
class FileMeta(TypedDict):
key: str # parameter name
dtype: torch.dtype
shape: torch.Size
type: type
tp_concat_dim: int
T = TypeVar("T")
def _dt_validate(value: Any) -> torch.dtype:
"""Validate the input value to ensure it is a valid torch.dtype"""
if isinstance(value, str):
if not value.startswith("torch."):
raise ValueError(f"dtype {value} should start with torch.")
try:
value = getattr(torch, value.split(".")[1])
except AttributeError as e:
raise ValueError(f"unknown dtype: {value}") from e
if not isinstance(value, torch.dtype):
raise TypeError(f"dtype {value} should be torch.dtype, got {type(value)}")
return value
# Annotated type for torch.dtype with validation and serialization
_TorchDtype = Annotated[
torch.dtype,
PlainValidator(_dt_validate),
PlainSerializer(lambda x: str(x), return_type=str),
WithJsonSchema({"type": "string"}, mode="serialization"),
]
def _size_validate(value: Any) -> torch.Size:
"""Validate the input value to ensure it is a valid torch.Size"""
if isinstance(value, list | tuple):
return torch.Size(value)
if not isinstance(value, torch.Size):
raise TypeError(f"size {value} should be torch.Size, got {type(value)}")
return value
# Annotated type for torch.Size with validation and serialization
_TorchSize = Annotated[
torch.Size,
PlainValidator(_size_validate),
PlainSerializer(lambda x: tuple(x), return_type=tuple),
WithJsonSchema({"type": "array", "items": {"type": "integer"}}, mode="serialization"),
]
def _tensor_validate(value: Any) -> torch.Tensor:
"""Validate the input value to ensure it is a valid torch.Tensor"""
if isinstance(value, torch.Tensor):
return value
raise TypeError(f"tensor {value} should be torch.Tensor, got {type(value)}")
# Annotated type for torch.Tensor with validation
_TorchTensor = Annotated[
torch.Tensor,
PlainValidator(_tensor_validate),
]
class ParameterMeta(BaseModel):
"""Metadata for a parameter including name, dtype, and shape"""
name: str
dtype: _TorchDtype
shape: _TorchSize
class MemoryBuffer(BaseModel):
"""
MemoryBuffer assembles a group of parameter tensors into a single buffer,
and records the meta information of each original parameter.
"""
buffer: _TorchTensor
size: int # size of buffer in bytes
metas: list[ParameterMeta]
class MemoryBufferMeta(BaseModel):
"""The meta info of MemoryBuffer, but not store the buffer data"""
size: int
metas: list[ParameterMeta]
# 256 bytes alignment when flatten torch tensors to uint8 buffer
_ALIGN_SIZE = 256
def _align_size(dtype: torch.dtype, shape: torch.Size) -> int:
"""
Calculate the aligned size of a torch tensor
If the tensor's size (in bytes) cannot be evenly divided by _ALIGN_SIZE,
it will be rounded up to the nearest multiple of _ALIGN_SIZE.
Args:
dtype (torch.dtype): The data type of the tensor (e.g., torch.float32, torch.int64).
shape (torch.Size): The shape of the tensor, representing its dimensions.
Returns:
int: The aligned size of the tensor in bytes.
"""
return (dtype.itemsize * shape.numel() + _ALIGN_SIZE - 1) // _ALIGN_SIZE * _ALIGN_SIZE
@lru_cache(maxsize=1)
def get_ip() -> str:
try:
# try to get ip from network interface
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except Exception as e: # noqa: BLE001
# fallback to get ip from hostname
print(f"fail to get ip from network interface, fallback to get ip from hostname: {e}")
return socket.gethostbyname(socket.gethostname())
def npu_generate_uuid() -> str:
"""Generate uuid for each npu device"""
str_pid = str(os.getpid())
npu_num = 8
try:
for npu_id in range(npu_num):
cmd = ["npu-smi", "info", "-t", "proc-mem", "-i", str(npu_id)]
result = subprocess.run(cmd, check=True, capture_output=True, text=True) # noqa: S603
str_result = str(result.stdout)
if str_pid in str_result:
# In A3 server, one NPU has two chips.
match_chip_count = re.search(r"Chip Count[^\d]*(\d+)", str_result)
chip_count = int(match_chip_count.group(1))
search_after_pid = str_result[str_result.find(str_pid) + len(str_pid) :]
match_chip_id = re.search(r"Chip ID[^\d]*(\d+)", search_after_pid)
chip_id = int(match_chip_id.group(1))
return f"{get_ip()}-{npu_id * chip_count + chip_id}"
raise ValueError("The current process is not running on the npu device")
except subprocess.CalledProcessError as e:
raise ValueError("The current process is not running on the npu device") from e
def _get_physical_device_id(device_index: int | None = None) -> str:
"""
Get the physical device (GPU or NPU) uuid of the current device
"""
try:
if get_device_name() == "npu":
return f"NPU-{npu_generate_uuid()}"
else:
return f"GPU-{get_torch_device().get_device_properties(device_index).uuid!s}"
except AssertionError as e:
raise ValueError(f"fail to get physical gpu id {device_index}") from e
class FlattenedTensorMetadata(TypedDict):
name: str
shape: torch.Size
dtype: torch.dtype
# specify the start offset of this tensor in shared ipc_buffer tensor
offset: int
def _to_flattened_tensor_meta(metas: list[ParameterMeta], offset: int = 0) -> list[FlattenedTensorMetadata]:
"""
compute the offset of each parameter in the buffer
Args:
metas (list[ParameterMeta]): The list of parameter metas info
offset (int): The start offset of the buffer. Defaults to 0.
Returns:
list[FlattenedTensorMetadata]: The list of FlattenedTensorMetadata:
"""
ret = []
for meta in metas:
size = _align_size(meta.dtype, meta.shape)
ret.append(
{
"name": meta.name,
"dtype": meta.dtype,
"shape": meta.shape,
"offset": offset,
}
)
offset += size
return ret
def _extract_weights(
flatten_metas: list[FlattenedTensorMetadata], buffer: torch.Tensor
) -> list[tuple[str, torch.Tensor]]:
"""
According to the flatten_metas and buffer, extract the weights
"""
assert buffer is not None
weights: list[tuple[str, torch.Tensor]] = []
for item in flatten_metas:
shape = item["shape"]
if isinstance(shape, list | tuple):
shape = torch.Size(shape)
assert isinstance(shape, torch.Size)
dtype, offset = item["dtype"], item["offset"]
size = dtype.itemsize * shape.numel()
tensor = buffer[offset : offset + size].view(dtype=dtype).view(shape)
weights.append((item["name"], tensor))
return weights
class CheckpointEngine:
"""
CheckpointEngine class for control parameters synchronization.
Each trainer/rollout rank has a CheckpointEngine instance.
"""
def __init__(
self, current_rank: int, actor_ranks: list[int], rollout_ranks: list[int], device_buffer_size_M: int
) -> None:
self.current_rank = current_rank
self.actor_ranks = actor_ranks
self.rollout_ranks = rollout_ranks
# global_buckets saves the global MemoryBufferMeta infos.
# Thus each CheckpointEngine instance can control their operations in SPMD
self.global_buckets: dict[int, list[MemoryBufferMeta]] = None
# min device_buffer_size for h2d and broadcast
self.device_buffer_size_M = device_buffer_size_M
# ipc config for broadcast in pipeline mode
self._zmq_ctx = zmq.Context()
self._zmq_addr_counter: int = 0
device_index = self.current_rank % get_torch_device().device_count()
self._device_uuid = _get_physical_device_id(device_index)
def register_checkpoint(
self, weights_info: list[tuple[str, torch.Size, torch.dtype]], cpu_named_params: dict[str, torch.Tensor]
):
"""
Register checkpoint information and prepare memory buffers for parameter synchronization.
This function organizes the parameters into memory buckets for efficient synchronization
and prepares pinned memory buffers for faster data transfer between CPU and device.
Args:
weights_info (list[tuple[str, torch.Size, torch.dtype]]):
A list of tuples containing parameter name, shape, and data type.
cpu_named_params (dict[str, torch.Tensor]):
A dictionary mapping parameter names to their corresponding CPU tensors.
Steps:
1. Calculate the bucket size based on the largest parameter tensor size and the device buffer size.
2. Organize parameters into global buckets for each actor rank, ensuring that the total size of each bucket
does not exceed the bucket size.
3. For actor ranks, allocate pinned memory buffers for each bucket and copy the parameter tensors
into these buffers.
Notes:
Each CheckpointEngine instance maintains the global buckets metas,
but stores part of parmas data in host memory
"""
bucket_size = max(
self.device_buffer_size_M << 20, max(_align_size(dtype, shape) for _, shape, dtype in weights_info)
)
print(
f"set checkpoint_engine device buffer size: {self.device_buffer_size_M}M, "
f"and finally set it to {bucket_size >> 20}M considering the largest parameter tensor size"
)
self.bucket_size = bucket_size
# global_buckets saves the global MemoryBufferMeta infos.
if self.global_buckets is None:
self.global_buckets = {rank: [MemoryBufferMeta(size=0, metas=[])] for rank in self.actor_ranks}
actor_ranks_size = len(self.actor_ranks)
assert actor_ranks_size > 0, f"actor_ranks:{self.actor_ranks} should not be empty"
for param_idx, (param_name, param_shape, param_dtype) in enumerate(weights_info):
# Each parameter is assigned to an actor rank, and only this rank will store it
assgin_rank = self.actor_ranks[param_idx % actor_ranks_size]
param_size = _align_size(param_dtype, param_shape)
if self.global_buckets[assgin_rank][-1].size + param_size > bucket_size:
assert self.global_buckets[assgin_rank][-1].size, (
f"global_buckets[{assgin_rank}][-1].size:{self.global_buckets[assgin_rank][-1].size}"
" should not be 0"
)
self.global_buckets[assgin_rank].append(MemoryBufferMeta(size=0, metas=[]))
self.global_buckets[assgin_rank][-1].metas.append(
ParameterMeta(name=param_name, dtype=param_dtype, shape=param_shape)
)
self.global_buckets[assgin_rank][-1].size += param_size
def register_pin_memory(idx: int, size: int) -> tuple[int, torch.Tensor]:
"""Allocate pinned memory for a bucket."""
buffer = torch.empty(size, dtype=torch.uint8, pin_memory=True)
return idx, buffer
def register_tensor(buffer: torch.Tensor, offset: int, tensor: torch.Tensor):
"""Copy a tensor into a pinned memory buffer."""
buffer[offset : offset + tensor.nbytes] = tensor.view(-1).view(dtype=torch.uint8)
memory_buffers = [] # for rollout rank, return empty buffer
if self.current_rank in self.actor_ranks: # is_actor
local_buckets = self.global_buckets[self.current_rank]
memory_buffers = [
MemoryBuffer(buffer=torch.empty(0), size=bucket.size, metas=bucket.metas) for bucket in local_buckets
]
# Use thread pool to accelerate organize parameters into buckets
with concurrent.futures.ThreadPoolExecutor(max_workers=32) as executor:
futures = [
executor.submit(register_pin_memory, idx, bucket.size) for idx, bucket in enumerate(local_buckets)
]
new_futures = []
for future in concurrent.futures.as_completed(futures):
idx, buffer = future.result()
assert buffer.numel() == local_buckets[idx].size, (
f"buffer numel {buffer.numel()} should be equal to bucket size {local_buckets[idx].size}"
)
memory_buffers[idx].buffer = buffer
print(
f"[rank{self.current_rank}] register pin_memory for "
f" bucket {idx + 1}/{len(local_buckets)} finished, "
f"size {buffer.numel() / 1024 / 1024:.2f}MiB, start to copy tensors to buffer"
)
offset = 0
for meta in local_buckets[idx].metas:
name = meta.name
tensor = cpu_named_params[name]
size = _align_size(tensor.dtype, tensor.shape)
assert size == _align_size(meta.dtype, meta.shape), (
f"tensor {name} size {size} should be equal to "
f"meta size {_align_size(meta.dtype, meta.shape)}"
)
new_futures.append(executor.submit(register_tensor, buffer, offset, tensor))
offset += size
for future in concurrent.futures.as_completed(new_futures):
future.result()
self.memory_buffers = memory_buffers
def get_max_buckets_num_per_rank(self):
"""
Get the maximum number of buckets for all rank.
"""
assert self.global_buckets is not None
return max(len(buckets) for buckets in self.global_buckets.values())
def _bind_zmq_socket(self) -> tuple[zmq.Socket, list[tuple[str, str]]]:
"""
Bind zmq socket for broadcast.
"""
def zmq_handle(device_uuid: str) -> str:
return f"ipc://@checkpoint-engine-{device_uuid}-{self._zmq_addr_counter}.sock"
socket_path = zmq_handle(self._device_uuid)
socket = self._zmq_ctx.socket(zmq.REQ)
socket.bind(socket_path)
self._zmq_addr_counter += 1
return socket, socket_path
def update_checkpoint(self, inference_model, group_name: str, overlap_broadcast_and_consume: bool = False):
"""
Update the checkpoint by broadcasting and loading weights.
This function handles the synchronization of parameters across ranks by:
1. Copying data from memory buffers to device buffers (h2d_buffer).
2. Broadcasting the data to all ranks using collective communication.
3. Loading the weights into the inference model if provided.
4. Optionally, use a pipeline approach for broadcasting and loading weights.
Args:
inference_model: The model to load weights into. If None (trainer rank), weights are only broadcasted.
group_name (str): The name of the collective communication group.
overlap_broadcast_and_consume (bool): Whether to use the pipeline approach
for broadcasting and loading weights.
"""
try:
h2d_buffer: torch.Tensor | None = (
None
if self.current_rank in self.rollout_ranks
else torch.empty(self.bucket_size, dtype=torch.uint8, device=get_torch_device().current_device())
)
# for pipeline mode, we need to allocate 2x buffer size
broadcast_load_buffer = torch.empty(
self.bucket_size * (2 if overlap_broadcast_and_consume else 1),
dtype=torch.uint8,
device=get_torch_device().current_device(),
)
except Exception:
print(
"allocate buffer for update_checkpoint failed, "
"you may need to reduce "
"config.async_training.checkpoint_engine.device_buffer_size_M"
)
raise
max_h2d_iter = self.get_max_buckets_num_per_rank()
if overlap_broadcast_and_consume:
socket, socket_path = self._bind_zmq_socket()
# Define a function to update weights from IPC
def update_weights_from_ipc_(socket_path):
zmq_ctx = zmq.Context()
socket = zmq_ctx.socket(zmq.REP)
socket.connect(socket_path)
socket.recv_pyobj()
socket.send(b"")
while True:
payload: tuple[Callable, tuple] | list[FlattenedTensorMetadata] | None = socket.recv_pyobj()
if payload is None:
# means the update is done
get_torch_device().synchronize()
socket.send(b"")
break
assert isinstance(payload, list)
if inference_model is not None:
inference_model.load_weights(_extract_weights(payload, broadcast_load_buffer))
get_torch_device().synchronize()
socket.send(b"")
req_thread = threading.Thread(
target=update_weights_from_ipc_,
args=(socket_path,),
)
req_thread.start()
socket.send_pyobj(b"")
get_torch_device().synchronize()
gidx = 0
local_buckets = self.global_buckets.get(self.current_rank, [])
for i in range(max_h2d_iter):
# Step 1: Each actor rank copy the parameter tensor into device memory
if i < len(self.memory_buffers):
h2d_buffer[: local_buckets[i].size].data.copy_(self.memory_buffers[i].buffer)
# Step 2: Broadcast the device data in turn
for broadcast_rank, _buckets in self.global_buckets.items():
if i >= len(_buckets):
continue
bucket = _buckets[i]
# Prepare the broadcast buffer
start = gidx % 2 * self.bucket_size if overlap_broadcast_and_consume else 0
buffer_b: torch.Tensor = broadcast_load_buffer[start : start + bucket.size]
if broadcast_rank == self.current_rank:
buffer_b.data.copy_(h2d_buffer[: bucket.size])
# Broadcast the buffer to all ranks
collective.broadcast(buffer_b, src_rank=broadcast_rank, group_name=group_name)
if overlap_broadcast_and_consume:
socket.recv()
collective.barrier(group_name=group_name)
socket.send_pyobj(_to_flattened_tensor_meta(bucket.metas, start))
elif inference_model is not None:
named_tensor = _to_flattened_tensor_meta(bucket.metas, 0)
inference_model.load_weights(_extract_weights(named_tensor, buffer_b))
gidx += 1
if overlap_broadcast_and_consume:
socket.recv()
socket.send_pyobj(None)
socket.recv()
req_thread.join()
socket.close()
collective.barrier(group_name=group_name)
# clear host memory cache
self.memory_buffers = []
|
verl__experimental__fully_async_policy__checkpoint_engine.py
|
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from collections import defaultdict
from dataclasses import dataclass
from typing import Any, Optional
import numpy as np
import torch
from verl import DataProto
from verl.experimental.agent_loop.agent_loop import AgentLoopOutput
from verl.trainer.ppo.ray_trainer import compute_response_mask
@dataclass
class RolloutSample:
"""Enhanced rollout sample containing both original batch info and AgentLoopOutput"""
# Original batch information
full_batch: Any
# AgentLoopOutput from generation
agent_loop_output_list: list[AgentLoopOutput]
# Metadata
sample_id: str
epoch: int
# Processing metadata
processing_times: list[float]
tool_calls: list[float]
param_version: int
param_version_start: list[int]
param_version_end: list[int]
rollout_status: dict[str, Any]
@dataclass
class ValidateMetrics:
"""Metrics for validation"""
timing_raw: dict[str, Any]
metrics: Optional[dict[str, Any]] = None
global_steps: Optional[int] = None
param_version: Optional[int] = None
def prepare_single_generation_data(batch_dict, config) -> DataProto:
"""
Similar to the logic of ray_trainer._prepare_generate_batch, but for a single sample.
Separate the data used for generation from the original data.
Returns:
tuple: (original_batch_dict, gen_data_for_single_sample)
"""
full_batch = DataProto.from_single_dict(batch_dict)
batch_keys_to_pop = []
non_tensor_batch_keys_to_pop = []
existing_batch_keys = [k for k in batch_keys_to_pop if k in full_batch.batch.keys()]
existing_non_tensor_keys = [k for k in non_tensor_batch_keys_to_pop if k in full_batch.non_tensor_batch.keys()]
if existing_batch_keys or existing_non_tensor_keys:
full_batch.pop(
batch_keys=existing_batch_keys,
non_tensor_batch_keys=existing_non_tensor_keys,
)
# Setting selected agent, that supports partial
if config.actor_rollout_ref.rollout.multi_turn.enable:
full_batch.non_tensor_batch["agent_name"] = np.array(
["async_partial_tool_agent"] * len(full_batch), dtype=object
)
else:
full_batch.non_tensor_batch["agent_name"] = np.array(
["partial_single_turn_agent"] * len(full_batch), dtype=object
)
# Add global step count to generated data
full_batch = full_batch.repeat(repeat_times=config.actor_rollout_ref.rollout.n, interleave=True)
return full_batch
def assemble_batch_from_rollout_samples(
rollout_samples: list[RolloutSample], tokenizer, config, balance_batch=None
) -> DataProto:
"""
Assemble gen_batch_output from RolloutSample objects
Assembles batches from RolloutSample objects, similar to the _post_generate_batch logic in ray_trainer.
Args:
rollout_samples: List of RolloutSample objects
tokenizer: Tokenizer instance
config: Configuration object containing trainer settings
balance_batch: Whether to balance the batch (simplified version)
Returns:
DataProto: Assembled gen_batch_output
Raises:
ValueError: If rollout_samples is empty
"""
start_time = time.time()
if not rollout_samples:
raise ValueError("Empty rollout_samples provided for batch assembly")
print(f"[BatchUtils] Assembling batch from {len(rollout_samples)} RolloutSample objects")
rollout_samples_batch = []
processing_times = []
tool_calls = []
rollout_status = rollout_samples[0].rollout_status
# Add a prefix to all rollout_status keys
rollout_status = {f"fully_async/{key}": value for key, value in rollout_status.items()}
for rs in rollout_samples:
rollout_samples_batch.append(rs.full_batch)
final_batch = DataProto.concat(rollout_samples_batch)
# Calculate response_mask (if not present)
if "response_mask" not in final_batch.batch.keys():
final_batch.batch["response_mask"] = compute_response_mask(final_batch)
if balance_batch:
balance_batch(final_batch, metrics={})
# Calculate the global valid token number
if "attention_mask" in final_batch.batch:
final_batch.meta_info["global_token_num"] = torch.sum(final_batch.batch["attention_mask"], dim=-1).tolist()
processing_times = final_batch.non_tensor_batch["processing_times"]
tool_calls = final_batch.non_tensor_batch["tool_calls_times"]
# Collect statistics
processing_time_stats = {
"processing_time/avg": np.mean(processing_times),
"processing_time/max": np.max(processing_times),
"processing_time/min": np.min(processing_times),
"processing_time/tp50": np.percentile(processing_times, 50),
"processing_time/tp99": np.percentile(processing_times, 99),
"processing_time/tp95": np.percentile(processing_times, 95),
}
tool_calls_stats = {}
if len(tool_calls) > 0:
tool_calls_stats = {
"timing_s/agent_loop/tool_calls/max": np.max(tool_calls),
"timing_s/agent_loop/tool_calls/min": np.min(tool_calls),
"timing_s/agent_loop/tool_calls/mean": np.mean(tool_calls),
}
processing_time_stats = {f"fully_async/{key}": value for key, value in processing_time_stats.items()}
param_version_start = final_batch.non_tensor_batch["param_version_start"]
param_version_end = final_batch.non_tensor_batch["param_version_end"]
param_version_diff = [abs(a - b) for a, b in zip(param_version_end, param_version_start, strict=False)]
num_diff0 = param_version_diff.count(0)
partial_stats = {
"fully_async/partial/total_partial_num": len(param_version_diff) - num_diff0,
"fully_async/partial/partial_ratio": (len(param_version_diff) - num_diff0) / len(param_version_diff),
"fully_async/partial/max_partial_span": max(param_version_diff),
}
# add meta_info
param_versions = [rs.param_version for rs in rollout_samples]
trajectorys_param_versions = final_batch.non_tensor_batch["param_version_end"]
final_batch.meta_info.update(
{
"rollout_param_versions": param_versions,
"param_version_diversity": len(set(param_versions)) if param_versions else 0,
"trajectory_param_versions": trajectorys_param_versions,
**processing_time_stats,
**rollout_status,
**partial_stats,
**tool_calls_stats,
}
)
print(f"[BatchUtils] Batch assembly completed in {time.time() - start_time:.2f}s")
return final_batch
class MetricsAggregator:
"""Metrics aggregator, used to combine metrics from multiple training steps"""
def __init__(self, total_gpus: int):
# Store all values for each metric
self.metric_values: dict[str, list[float]] = defaultdict(list)
# Store the number of samples at each step for weighted averaging
self.sample_counts: list[int] = []
# Store the timestamp of each step for time-related calculations
self.timestamps: list[float] = []
# Step Count
self.step_count = 0
# total num gpus used
self.total_gpus = total_gpus
# Metric aggregation rule configuration
self.aggregation_rules = self._init_aggregation_rules()
def _init_aggregation_rules(self) -> dict[str, dict[str, list[str]]]:
"""Initialize metrics aggregation rules"""
return {
# Time-Based metrics, can add metrics here
"time_sum": ["perf/time_per_step"],
"min": ["timing_s/agent_loop/tool_calls/min"],
"avg": ["timing_s/agent_loop/tool_calls/mean"],
"max": ["timing_s/agent_loop/tool_calls/max"],
"last": [
"fully_async/count/total_generated_samples",
"fully_async/count/stale_samples_processed",
"fully_async/count/stale_trajectory_processed",
"fully_async/count/current_param_version",
"fully_async/count/dropped_stale_samples",
"training/global_step", # TODO change name to: total_step
],
}
def add_step_metrics(self, metrics: dict[str, Any], sample_count: int, timestamp: float = None):
"""Adding a single-step metrics"""
if timestamp is None:
timestamp = time.time()
self.sample_counts.append(sample_count)
self.timestamps.append(timestamp)
self.step_count += 1
# Store all metrics values
for key, value in metrics.items():
if isinstance(value, int | float | np.number):
self.metric_values[key].append(float(value))
elif isinstance(value, torch.Tensor):
self.metric_values[key].append(float(value.item()))
def _get_aggregation_type(self, metric_name: str) -> str:
"""Determine the aggregation type based on the metric name"""
for agg_type, metric_list in self.aggregation_rules.items():
if metric_name in metric_list:
return agg_type
metric_lower = metric_name.lower()
if any(keyword in metric_lower for keyword in ["timing_s/"]):
return "time_sum"
if any(keyword in metric_lower for keyword in ["mean", "avg", "average"]):
return "avg"
if any(keyword in metric_lower for keyword in ["max", "maximum"]):
return "max"
if any(keyword in metric_lower for keyword in ["min", "minimum"]):
return "min"
if any(keyword in metric_lower for keyword in ["sum", "total"]):
return "sum"
if any(keyword in metric_lower for keyword in ["weighted_avg"]):
return "weighted_avg"
return "avg"
def _aggregate_single_metric(self, metric_name: str, values: list[float]) -> float:
"""Aggregating a single metric"""
if not values:
return 0.0
agg_type = self._get_aggregation_type(metric_name)
if agg_type == "last":
return values[-1]
elif agg_type == "weighted_avg":
# Weighted average
if len(values) != len(self.sample_counts):
# If the lengths do not match, use a simple average
return sum(values) / len(values)
total_samples = sum(self.sample_counts)
if total_samples == 0:
return sum(values) / len(values)
weighted_sum = sum(v * c for v, c in zip(values, self.sample_counts, strict=False))
return weighted_sum / total_samples
elif agg_type == "sum" or agg_type == "time_sum":
return sum(values)
elif agg_type == "avg":
return sum(values) / len(values)
elif agg_type == "max":
return max(values)
elif agg_type == "min":
return min(values)
else:
# Default average
return sum(values) / len(values)
def get_aggregated_metrics(self) -> dict[str, Any]:
"""aggregated metrics"""
t = time.time()
if self.step_count == 0:
return {}
aggregated = {}
# Aggregate all metrics
for metric_name, values in self.metric_values.items():
aggregated[metric_name] = self._aggregate_single_metric(metric_name, values)
# Aggregate special metrics
aggregated = self._special_metrics_aggergate(aggregated)
print(f"aggregated metrics done. cost {time.time() - t}")
return aggregated
def _special_metrics_aggergate(self, aggregated: dict[str, Any]) -> dict[str, Any]:
"""calculate special metrics"""
# global_seqlen/minmax_diff
if "global_seqlen/minmax_diff" in aggregated.keys():
aggregated["global_seqlen/minmax_diff"] = aggregated["global_seqlen/max"] - aggregated["global_seqlen/min"]
# perf/throughput
REQUIRED_PERF_KEYS = {"perf/throughput", "perf/total_num_tokens", "perf/time_per_step"}
if REQUIRED_PERF_KEYS.issubset(aggregated):
aggregated["perf/throughput"] = aggregated["perf/total_num_tokens"] / (
aggregated["perf/time_per_step"] * self.total_gpus
)
# trainer/idle_ratio
if "timing_s/gen" in aggregated.keys() and "timing_s/step" in aggregated.keys():
aggregated["trainer/idle_ratio"] = aggregated["timing_s/gen"] / aggregated["timing_s/step"]
return aggregated
def reset(self):
"""Reset Aggregator"""
self.metric_values.clear()
self.sample_counts.clear()
self.timestamps.clear()
self.step_count = 0
def get_current_stats(self) -> dict[str, Any]:
"""Get statistics about the current aggregation state (for debugging)"""
return {
"step_count": self.step_count,
"metric_count": len(self.metric_values),
"total_samples": sum(self.sample_counts),
"metric_names": list(self.metric_values.keys()),
}
|
verl__experimental__fully_async_policy__detach_utils.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
import torch.distributed as dist
from packaging import version
from torch.distributed.tensor import DTensor
from torch.distributed.tensor._dtensor_spec import DTensorSpec
if version.parse(torch.__version__) < version.parse("2.6"):
raise RuntimeError("PyTorch 2.6 or higher is required to use fstp_utils.")
def fsdp2_sharded_save_to_cpu(
model: torch.nn.Module,
) -> tuple[dict[str, tuple[torch.Tensor, DTensorSpec]], DTensorSpec]:
"""
Sharded Save: Each process only saves the local DTensor shard from its own GPU to CPU memory.
Args:
model: FSDP2-wrapped model whose parameters are of DTensor type.
Returns:
cpu_sharded_state: Dictionary of CPU shards for the current process.
Key = parameter name, Value = (CPU shard tensor, original DTensorSpec)
global_spec: DTensorSpec of the first parameter (used to verify global rules during loading)
"""
cpu_sharded_state = {}
global_spec = None # Record global sharding rules (all parameters follow the same spec)
for param_name, param in model.named_parameters():
# Only process sharded parameters of DTensor type (core parameters of FSDP2)
if not isinstance(param, DTensor):
# Save non-sharded parameters (e.g., running_mean of BatchNorm) as local data
cpu_tensor = param.detach().cpu()
cpu_sharded_state[param_name] = (cpu_tensor, None)
continue
# Record global sharding rules (take spec of the first DTensor to ensure consistency)
if global_spec is None:
global_spec = param._spec
assert hasattr(global_spec, "device_mesh"), "DTensorSpec must contain 'device_mesh' attribute"
assert hasattr(global_spec, "placements"), "DTensorSpec must contain 'placements' attribute"
# 1. Extract local shard data from the current GPU (_local_tensor)
local_gpu_tensor = param._local_tensor # Local shard attribute defined in your DTensor class
# 2. Move to CPU memory and detach from computation graph
local_cpu_tensor = local_gpu_tensor.detach().cpu()
# 3. Save CPU shard + original DTensorSpec (ensure sharding rules remain unchanged)
cpu_sharded_state[param_name] = (local_cpu_tensor, param._spec)
assert global_spec is not None, "No DTensor-type parameters found in the model. FSDP2 sharding may not be enabled."
return cpu_sharded_state, global_spec
def fsdp2_sharded_load_from_cpu(
model: torch.nn.Module,
cpu_sharded_state: dict[str, tuple[torch.Tensor, Optional[DTensorSpec]]],
target_spec: DTensorSpec,
) -> None:
"""
Sharded Load: Each process only loads the CPU shard it is responsible for to the GPU,
keeping sharding rules unchanged.
Args:
model: FSDP2 model to be restored (must have the same structure as when saved)
cpu_sharded_state: Shard data read from CPU memory by the current process
(from fsdp2_sharded_save_to_cpu)
target_spec: Global DTensorSpec from saving (used to verify sharding rule consistency)
"""
# Verify device_mesh consistency (core: ensure loaded shards map to original GPUs)
current_device_mesh = None
for param in model.parameters():
if isinstance(param, DTensor):
current_device_mesh = param._spec.device_mesh
break
assert current_device_mesh is not None, "DTensor parameters not initialized in the model to be loaded"
assert current_device_mesh == target_spec.device_mesh, (
f"device_mesh mismatch during loading! Original: {target_spec.device_mesh}, Current: {current_device_mesh}"
)
for param_name, param in model.named_parameters():
# Skip parameters not in the saved state (e.g., newly added parameters)
if param_name not in cpu_sharded_state:
continue
# Extract CPU shard data and original Spec
local_cpu_tensor, saved_spec = cpu_sharded_state[param_name]
# Handle different parameter types: DTensor sharded parameters vs. regular parameters
if isinstance(param, DTensor):
# 1. Verify sharding rule consistency (placements must match original Spec)
assert saved_spec is not None, f"DTensorSpec missing in saved state for parameter {param_name}"
assert saved_spec.placements == target_spec.placements, (
f"Sharding strategy mismatch for parameter {param_name} (conflicts with global rules)!"
)
# 2. Move CPU shard data to the current GPU (device of param._local_tensor)
target_device = param._local_tensor.device
local_gpu_tensor = local_cpu_tensor.to(target_device)
# 3. Restore to DTensor's local shard (directly copy to _local_tensor, keep spec unchanged)
param._local_tensor.copy_(local_gpu_tensor)
else:
# Regular parameters: load directly to original device
target_device = param.device
param.data.copy_(local_cpu_tensor.to(target_device))
# Process synchronization: ensure all processes complete loading before proceeding
dist.barrier()
|
verl__experimental__fully_async_policy__fsdp2_utils.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import time
import torch
import torch.distributed
from omegaconf import DictConfig
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from verl.experimental.fully_async_policy.base_detach_sync import BaseDetachNcclSync
from verl.experimental.fully_async_policy.fsdp2_utils import fsdp2_sharded_load_from_cpu, fsdp2_sharded_save_to_cpu
from verl.single_controller.base.decorator import Dispatch, register
from verl.utils.device import (
get_device_name,
get_torch_device,
)
from verl.utils.fsdp_utils import (
fsdp_version,
load_fsdp_model_to_gpu,
offload_fsdp_model_to_cpu,
)
from verl.workers.fsdp_workers import AsyncActorRolloutRefWorker, CriticWorker
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
device_name = get_device_name()
__all__ = ["DetachActorWorker", "DetachAsyncRolloutWorker", "CriticWorker"]
class DetachNcclSync(BaseDetachNcclSync, AsyncActorRolloutRefWorker):
def __init__(self, config: DictConfig, role: str):
BaseDetachNcclSync.__init__(self, config, role)
AsyncActorRolloutRefWorker.__init__(self, config, role)
def _get_actor_params(self):
pass
@register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False)
def sync_rollout_weights(self, sync_group_name="actor_rollout"):
assert (self._is_actor or self._is_rollout) and not self.config.hybrid_engine
assert hasattr(self, "_weights_info") and self._weights_info is not None
if self._is_actor and self._is_offload_param:
load_fsdp_model_to_gpu(self.actor_module_fsdp)
params = self._get_actor_params() if self._is_actor else None
rollout_name = self.config.rollout.name
inference_model = None
if self._is_rollout:
if rollout_name == "vllm":
inference_model = BaseDetachNcclSync.get_inference_model(self.rollout)
from verl.utils.vllm.patch import patch_vllm_moe_model_weight_loader
patch_vllm_moe_model_weight_loader(inference_model)
elif rollout_name == "sglang":
inference_model = self.rollout._engine
# For ServerAdapter, _engine might be None and needs async initialization
if inference_model is None:
# Initialize the server adapter engine
print("[sync_rollout_weights] Initialize server adapter engine")
async def init_engine():
if hasattr(self.rollout, "_init_server_adapter"):
await self.rollout._init_server_adapter()
else:
print("[sync_rollout_weights] No _init_server_adapter method found")
return self.rollout._engine
inference_model = self._run_async_safely(init_engine())
# For ServerAdapter, only TP rank 0 initializes the engine
# TP rank != 0 can safely have inference_model as None
from verl.workers.rollout.sglang_rollout.sglang_rollout import ServerAdapter
is_server_adapter = isinstance(self.rollout, ServerAdapter)
is_non_tp_rank = False
if (
is_server_adapter
and hasattr(self.rollout, "device_mesh")
and self.rollout.device_mesh is not None
):
try:
is_non_tp_rank = self.rollout.device_mesh["infer_tp"].get_local_rank() != 0
except Exception:
pass
if inference_model is None and not (is_server_adapter and is_non_tp_rank):
raise RuntimeError(
f"Failed to initialize rollout engine. "
f"rollout type: {type(self.rollout)}, "
f"has _init_server_adapter: {hasattr(self.rollout, '_init_server_adapter')}"
)
else:
raise NotImplementedError(f"Unknown rollout name: {rollout_name}")
if rollout_name == "sglang" and self._is_rollout:
self._sync_sglang_weights(inference_model, params, sync_group_name)
else:
self._sync_vllm_weights(inference_model, params, sync_group_name)
if self._is_actor and self._is_offload_param:
offload_fsdp_model_to_cpu(self.actor_module_fsdp)
get_torch_device().empty_cache()
def cache_actor_weights_to_cpu(self):
self.cpu_named_params = {}
if self._is_actor:
params = self._get_actor_params()
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
for tensor_idx, (key, _, _) in enumerate(self._weights_info):
origin_data = params[key]
if hasattr(origin_data, "full_tensor"):
origin_data = origin_data.full_tensor()
if tensor_idx % world_size == local_rank:
self.cpu_named_params[key] = origin_data.to("cpu", non_blocking=True)
get_torch_device().synchronize()
@register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False)
def sync_rollout_weights_by_checkpoint(self, sync_group_name="actor_rollout"):
assert (self._is_actor or self._is_rollout) and not self.config.hybrid_engine
assert hasattr(self, "_weights_info") and self._weights_info is not None
# Load model to GPU
load_start_time = time.time()
if self._is_actor and self._is_offload_param:
load_fsdp_model_to_gpu(self.actor_module_fsdp)
load_duration = time.time() - load_start_time
from ray.util.collective import collective
# Cache actor weights to CPU and measure the time taken
cache_start_time = time.time()
self.cache_actor_weights_to_cpu()
cache_end_time = time.time()
cache_duration = cache_end_time - cache_start_time
# Register the cached weights into the checkpoint engine
self.checkpoint_engine.register_checkpoint(self._weights_info, self.cpu_named_params)
register_end_time = time.time()
register_duration = register_end_time - cache_end_time
self.cpu_named_params = {}
collective.barrier(group_name=sync_group_name)
update_start_time = time.time()
inference_model = None
if self._is_rollout:
inference_model = BaseDetachNcclSync.get_inference_model(self.rollout)
from verl.utils.vllm.patch import patch_vllm_moe_model_weight_loader
patch_vllm_moe_model_weight_loader(inference_model)
# Update the checkpoint with the inference model and broadcast weights
self.checkpoint_engine.update_checkpoint(
inference_model=inference_model,
group_name=sync_group_name,
overlap_broadcast_and_consume=self.config.checkpoint_engine.overlap_broadcast_and_consume,
)
update_end_time = time.time()
update_duration = update_end_time - update_start_time
offload_start_time = time.time()
if self._is_actor and self._is_offload_param:
offload_fsdp_model_to_cpu(self.actor_module_fsdp)
offload_duration = time.time() - offload_start_time
print(
f"sync_rollout_weights_by_checkpoint finish!, rank:{torch.distributed.get_rank()},"
f" is_actor:{self._is_actor}, is_rollout:{self._is_rollout},"
f" total cost:{update_end_time - cache_start_time} seconds, while cache cost {cache_duration} seconds, "
f" register cost {register_duration} seconds, update cost {update_duration} seconds"
)
if self._is_actor and self._is_offload_param:
print(
f"sync_rollout_weights_by_checkpoint load model to gpu cost {load_duration} seconds,"
f" offload model to cpu cost {offload_duration} seconds"
)
class DetachActorWorker(DetachNcclSync):
def __init__(self, config: DictConfig, role: str):
print("[DetachAsyncRolloutWorker] Initializing via DetachNcclSync...")
DetachNcclSync.__init__(self, config, role)
def _get_actor_params(self):
assert self._is_actor
params = self.actor_module_fsdp.state_dict()
from verl.utils.model import convert_weight_keys
params = convert_weight_keys(
params, getattr(self.actor_module_fsdp, "_fsdp_wrapped_module", self.actor_module_fsdp)
)
return params
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def get_actor_weights_info(self):
assert self._is_actor
if hasattr(self, "_weights_info"):
return self._weights_info
if fsdp_version(self.actor_module_fsdp) == 1:
from torch.distributed.fsdp.api import ShardedStateDictConfig, StateDictType
FSDP.set_state_dict_type(
self.actor_module_fsdp,
state_dict_type=StateDictType.SHARDED_STATE_DICT,
state_dict_config=ShardedStateDictConfig(),
)
params = self._get_actor_params()
ret = []
for key, tensor in params.items():
ret.append((key, tensor.size(), tensor.dtype))
self._weights_info = ret
return ret
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def save_model_to_cpu(self, n):
if not hasattr(self, "cpu_saved_models"):
self.cpu_saved_models = {}
self.cpu_saved_models[n] = fsdp2_sharded_save_to_cpu(self.actor_module_fsdp)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def restore_model_from_cpu(self, n):
if n in self.cpu_saved_models:
cpu_sharded_state, global_spec = self.cpu_saved_models[n]
fsdp2_sharded_load_from_cpu(self.actor_module_fsdp, cpu_sharded_state, global_spec)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def clear_cpu_model(self, n):
if n in self.cpu_saved_models:
del self.cpu_saved_models[n]
class DetachAsyncRolloutWorker(DetachNcclSync):
def __init__(self, config: DictConfig, role: str):
print(f"[DetachAsyncRolloutWorker] {DetachAsyncRolloutWorker.__mro__}")
DetachNcclSync.__init__(self, config, role)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def set_actor_weights_info(self, weights_info):
assert self._is_rollout
self._weights_info = weights_info
|
verl__experimental__fully_async_policy__fsdp_workers.py
|
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import socket
import threading
from pprint import pprint
import hydra
import ray
from omegaconf import OmegaConf
from verl.experimental.fully_async_policy.fully_async_rollouter import FullyAsyncRollouter
from verl.experimental.fully_async_policy.fully_async_trainer import FullyAsyncTrainer
from verl.experimental.fully_async_policy.message_queue import MessageQueue, MessageQueueClient
from verl.trainer.ppo.ray_trainer import ResourcePoolManager
from verl.trainer.ppo.utils import Role, need_reference_policy
from verl.utils.fs import copy_to_local
def create_resource_pool_manager(config, roles: list) -> ResourcePoolManager:
"""
Create resource pool manager
Args:
config: Configuration object
roles: List of roles that need to create resource pools
Returns:
ResourcePoolManager: Resource pool manager
"""
resource_pool_spec = {}
mapping = {}
# Actor/Critic resource pool
if any(role in roles for role in [Role.Actor, Role.ActorRollout, Role.Critic, Role.RefPolicy, Role.RewardModel]):
assert config.trainer.n_gpus_per_node > 0, "config.trainer.n_gpus_per_node must be greater than 0"
assert config.trainer.nnodes > 0, "config.trainer.nnodes must be greater than 0"
trainer_pool = [config.trainer.n_gpus_per_node] * config.trainer.nnodes
resource_pool_spec["trainer_pool"] = trainer_pool
# Map training-related roles to the same resource pool
for role in [Role.Actor, Role.ActorRollout, Role.Critic, Role.RefPolicy, Role.RewardModel]:
if role in roles:
mapping[role] = "trainer_pool"
# Rollout resource pool
if Role.Rollout in roles:
assert config.rollout.n_gpus_per_node > 0, "config.rollout.n_gpus_per_node must be greater than 0"
assert config.rollout.nnodes > 0, "config.rollout.nnodes must be greater than 0"
rollout_pool = [config.rollout.n_gpus_per_node] * config.rollout.nnodes
resource_pool_spec["rollout_pool"] = rollout_pool
mapping[Role.Rollout] = "rollout_pool"
return ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping)
def create_role_worker_mapping(config):
"""
Create mapping from roles to worker classes
Args:
config: Configuration object
Returns:
dict: Mapping from roles to worker classes
"""
# Select worker class based on strategy
use_legacy_worker_impl = config.trainer.get("use_legacy_worker_impl", "auto")
if use_legacy_worker_impl == "disable":
from verl.experimental.separation.engine_workers import (
DetachActorWorker,
DetachAsyncRolloutWorker,
TrainingWorker,
)
from verl.single_controller.ray import RayWorkerGroup
ray_worker_group_cls = RayWorkerGroup
CriticWorker = TrainingWorker
else:
if config.actor_rollout_ref.actor.strategy in ["fsdp", "fsdp2"]:
assert config.actor_rollout_ref.actor.strategy == config.critic.strategy
from verl.experimental.fully_async_policy.fsdp_workers import (
CriticWorker,
DetachActorWorker,
DetachAsyncRolloutWorker,
)
from verl.single_controller.ray import RayWorkerGroup
ray_worker_group_cls = RayWorkerGroup
elif config.actor_rollout_ref.actor.strategy == "megatron":
assert config.critic.strategy == "megatron"
from verl.experimental.fully_async_policy.megatron_worker import (
CriticWorker,
DetachActorWorker,
DetachAsyncRolloutWorker,
)
from verl.single_controller.ray import RayWorkerGroup
ray_worker_group_cls = RayWorkerGroup
else:
raise NotImplementedError(f"Unsupported strategy: {config.actor_rollout_ref.actor.strategy}")
train_role = Role.ActorRollout if config.async_training.use_trainer_do_validate else Role.Actor
role_worker_mapping = {
train_role: ray.remote(DetachActorWorker),
Role.Rollout: ray.remote(DetachAsyncRolloutWorker),
Role.Critic: ray.remote(CriticWorker),
}
# Add reference policy (if KL loss or reward is required)
if need_reference_policy(config):
role_worker_mapping[Role.RefPolicy] = ray.remote(DetachActorWorker)
return role_worker_mapping, ray_worker_group_cls
@ray.remote(num_cpus=1)
class FullyAsyncTaskRunner:
"""
Ray remote class for executing distributed PPO training tasks.
"""
def __init__(self):
self.running = False
self.components = {}
self.shutdown_event = threading.Event()
def run(self, config):
print("[ASYNC MAIN] Starting fully async PPO training...")
self._initialize_components(config)
self._run_training_loop()
def _initialize_components(self, config) -> None:
print(f"[ASYNC MAIN] TaskRunner hostname: {socket.gethostname()}, PID: {os.getpid()}")
pprint(OmegaConf.to_container(config, resolve=True))
OmegaConf.resolve(config)
print("[ASYNC MAIN] Initializing model and tokenizer...")
local_path = copy_to_local(
config.actor_rollout_ref.model.path, use_shm=config.actor_rollout_ref.model.get("use_shm", False)
)
from verl.utils import hf_processor, hf_tokenizer
trust_remote_code = config.data.get("trust_remote_code", False)
tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code)
# Used for multimodal LLM, could be None
processor = hf_processor(local_path, trust_remote_code=trust_remote_code, use_fast=True)
self.components["tokenizer"] = tokenizer
self.components["processor"] = processor
self.components["config"] = config
print("[ASYNC MAIN] Creating worker mapping and resource pools...")
role_worker_mapping, ray_worker_group_cls = create_role_worker_mapping(config)
self.components["role_worker_mapping"] = role_worker_mapping
self.components["ray_worker_group_cls"] = ray_worker_group_cls
from concurrent.futures import ThreadPoolExecutor
print("[ASYNC MAIN] Creating FullyAsyncRollouter and FullyAsyncTrainer in parallel...")
with ThreadPoolExecutor(max_workers=2) as executor:
rollouter_future = executor.submit(self._create_rollouter, config)
rollouter_future.result()
# TODO: keep _create_rollouter and _create_trainer parallel
trainer_future = executor.submit(self._create_trainer, config)
# Wait for both to complete
trainer_future.result()
# sync total_train_steps between rollouter and trainer
total_train_steps = ray.get(self.components["rollouter"].get_total_train_steps.remote())
print(f"total_train_steps {total_train_steps}")
ray.get(self.components["trainer"].set_total_train_steps.remote(total_train_steps))
# max_queue_size
max_queue_size = ray.get(self.components["rollouter"].get_max_queue_size.remote())
print(f"[ASYNC MAIN] Creating MessageQueue... max_queue_size {max_queue_size}")
message_queue = MessageQueue.remote(config, max_queue_size)
message_queue_client = MessageQueueClient(message_queue)
self.components["message_queue"] = message_queue
self.components["message_queue_client"] = message_queue_client
ray.get(self.components["rollouter"].set_message_queue_client.remote(self.components["message_queue_client"]))
ray.get(self.components["trainer"].set_message_queue_client.remote(self.components["message_queue_client"]))
print("[ASYNC MAIN] Setting up parameter synchronization...")
from verl.experimental.fully_async_policy.param_sync import ParameterSynchronizer
param_synchronizer = ParameterSynchronizer.remote(
config=config,
trainer=self.components["trainer"],
rollouter=self.components["rollouter"],
mq=self.components["message_queue_client"],
)
ray.get(self.components["trainer"].set_parameter_synchronizer.remote(param_synchronizer))
# load checkpoint and sync parameter before doing anything
val_before_train = config.trainer.get("val_before_train", True)
# param_version resume from ckpt or default 0
param_version = ray.get(self.components["trainer"].load_checkpoint.remote())
ray.get(self.components["rollouter"].load_checkpoint.remote())
ray.get(
param_synchronizer.sync_weights.remote(
version=param_version,
validate=val_before_train,
use_trainer_do_validate=config.async_training.use_trainer_do_validate,
)
)
ray.get(param_synchronizer.wait_last_valid.remote())
self.components["param_synchronizer"] = param_synchronizer
print("[ASYNC MAIN] All components initialized successfully")
def _create_rollouter(self, config) -> None:
rollouter = FullyAsyncRollouter.remote(
config=config,
tokenizer=self.components["tokenizer"],
role_worker_mapping={Role.Rollout: self.components["role_worker_mapping"][Role.Rollout]},
resource_pool_manager=create_resource_pool_manager(config, roles=[Role.Rollout]),
ray_worker_group_cls=self.components["ray_worker_group_cls"],
processor=self.components["processor"],
device_name=config.trainer.device,
)
ray.get(rollouter.init_workers.remote())
ray.get(rollouter.set_max_required_samples.remote())
self.components["rollouter"] = rollouter
print("[ASYNC MAIN] Rollouter created and initialized successfully")
def _create_trainer(self, config) -> None:
trainer_role_mapping = {
role: worker_cls
for role, worker_cls in self.components["role_worker_mapping"].items()
if role != Role.Rollout
}
trainer = FullyAsyncTrainer.remote(
config=config,
tokenizer=self.components["tokenizer"],
role_worker_mapping=trainer_role_mapping,
resource_pool_manager=create_resource_pool_manager(config, roles=list(trainer_role_mapping.keys())),
ray_worker_group_cls=self.components["ray_worker_group_cls"],
processor=self.components["processor"],
device_name=config.trainer.device,
)
ray.get(trainer.init_workers.remote())
self.components["trainer"] = trainer
print("[ASYNC MAIN] FullyAsyncTrainer created and initialized successfully")
def _run_training_loop(self):
self.running = True
print("[ASYNC MAIN] Starting Rollouter and Trainer...")
rollouter_future = self.components["rollouter"].fit.remote()
trainer_future = self.components["trainer"].fit.remote()
futures = [rollouter_future, trainer_future]
try:
while futures:
# Use ray.wait to monitor all futures and return when any one is completed.
done_futures, remaining_futures = ray.wait(futures, num_returns=1, timeout=None)
for future in done_futures:
try:
ray.get(future)
print("[ASYNC MAIN] One component completed successfully")
except Exception as e:
print(f"[ASYNC MAIN] Component failed with error: {e}")
for remaining_future in remaining_futures:
ray.cancel(remaining_future)
raise e
futures = remaining_futures
except Exception as e:
print(f"[ASYNC MAIN] Training failed: {e}")
for future in futures:
ray.cancel(future)
raise
finally:
asyncio.run(self.components["message_queue_client"].clear_queue())
print("[ASYNC MAIN] Training completed or interrupted")
@hydra.main(config_path="config", config_name="fully_async_ppo_trainer", version_base=None)
def main(config):
from verl.trainer.main_ppo import run_ppo
# Ensure async training config exists
if not hasattr(config, "async_training"):
raise RuntimeError("must set async_training config")
from time import time
start_time = time()
run_ppo(config, task_runner_class=FullyAsyncTaskRunner)
print(f"total time: {time() - start_time:.2f} seconds")
if __name__ == "__main__":
main()
|
verl__experimental__fully_async_policy__fully_async_main.py
|
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import functools
import multiprocessing
import os
import time
from concurrent.futures import ThreadPoolExecutor
from pprint import pformat
import numpy as np
import ray
import torch
from ray import ObjectRef
from verl.experimental.fully_async_policy.detach_utils import (
RolloutSample,
ValidateMetrics,
prepare_single_generation_data,
)
from verl.experimental.fully_async_policy.message_queue import MessageQueueClient
from verl.experimental.separation.ray_trainer import SeparateRayPPOTrainer
from verl.single_controller.ray import RayClassWithInitArgs, RayWorkerGroup
from verl.trainer.ppo.ray_trainer import ResourcePoolManager
from verl.trainer.ppo.utils import Role, WorkerType
from verl.utils.checkpoint.checkpoint_manager import find_latest_ckpt_path
from verl.utils.profiler import marked_timer
from verl.utils.tracking import ValidationGenerationsLogger
@ray.remote(num_cpus=10, max_concurrency=100)
class FullyAsyncRollouter(SeparateRayPPOTrainer):
"""
Asynchronous sample generator, responsible for continuously generating training samples
and putting them into MessageQueue
Based on the mature implementation improvements of OneStepOffRayTrainer
"""
def __init__(
self,
config,
tokenizer,
role_worker_mapping: dict[Role, WorkerType],
resource_pool_manager: ResourcePoolManager,
ray_worker_group_cls: RayWorkerGroup = RayWorkerGroup,
processor=None,
device_name=None,
):
# Store the tokenizer for text processing
self.tokenizer = tokenizer
self.processor = processor
self.config = config
self.hybrid_engine = config.actor_rollout_ref.hybrid_engine
assert not self.hybrid_engine
assert self.config.data.train_batch_size == 0, "train_batch_size must be zero"
assert self.config.data.gen_batch_size == 1, "gen_batch_size must be one"
assert self.config.async_training.staleness_threshold >= 0, "staleness_threshold must larger than 0"
assert self.config.async_training.trigger_parameter_sync_step >= 1, (
"trigger_parameter_sync_step must larger than 1"
)
self.role_worker_mapping = role_worker_mapping
self.resource_pool_manager = resource_pool_manager
self.use_reference_policy = False
self.use_rm = False
self.use_critic = False
self.ray_worker_group_cls = ray_worker_group_cls
self.device_name = device_name if device_name else self.config.trainer.device
self.validation_generations_logger = ValidationGenerationsLogger(
project_name=self.config.trainer.project_name,
experiment_name=self.config.trainer.experiment_name,
)
self.ref_in_actor = False
self.kl_ctrl_in_reward = False
self.use_prefix_grouper = self.config.actor_rollout_ref.actor.get("use_prefix_grouper", False)
self.use_legacy_worker_impl = config.trainer.get("use_legacy_worker_impl", "auto")
# ==================== fully async config ====================
print("[FullyAsyncRollouter] Creating datasets...")
from verl.trainer.main_ppo import create_rl_dataset, create_rl_sampler
from verl.utils.dataset.rl_dataset import collate_fn
train_dataset = create_rl_dataset(config.data.train_files, config.data, tokenizer, processor)
val_dataset = create_rl_dataset(config.data.val_files, config.data, tokenizer, processor)
train_sampler = create_rl_sampler(config.data, train_dataset)
self._validate_config()
if self.config.async_training.use_trainer_do_validate:
rollout_gpus = config.rollout.nnodes * config.rollout.n_gpus_per_node
train_gpus = config.trainer.nnodes * config.trainer.n_gpus_per_node
total_gpus = rollout_gpus + train_gpus
print(f"[FullyAsyncRollouter] split before val_dataset total len: {len(val_dataset)}")
split_dataset = val_dataset.split(total_gpus)
rollout_val_dataset0 = split_dataset[:rollout_gpus]
from torch.utils.data import ConcatDataset
val_dataset = ConcatDataset(rollout_val_dataset0)
print(f"[FullyAsyncRollouter] split after val_dataset total len: {len(val_dataset)}")
print(f"[FullyAsyncRollouter] Rollouter _create_dataloader...\n{train_dataset}\n{val_dataset}")
self._create_dataloader(train_dataset, val_dataset, collate_fn, train_sampler)
self.total_rollout_steps = len(self.train_dataloader) * self.config.trainer.total_epochs
if self.config.rollout.total_rollout_steps is not None:
self.total_rollout_steps = min(self.config.rollout.total_rollout_steps, self.total_rollout_steps)
print(f"[FullyAsyncRollouter] Total rollout steps: {self.total_rollout_steps}")
self.total_train_steps = None
# Rollouter parameter configuration
self.message_queue_client = None
# Worker groups: rollout_wg is same to actor_rollout_wg
self.rollout_wg = None
self.actor_rollout_wg = None
self.async_rollout_manager = None
# Config
self.staleness_threshold: float = config.async_training.get("staleness_threshold", 1)
# required_samples use ppo_mini_batch_size*require_batches as the minimum number of samples.
self.require_batches = config.async_training.require_batches
self.required_samples = config.actor_rollout_ref.actor.ppo_mini_batch_size * self.require_batches
self.max_required_samples = None
self.max_concurrent_samples = None
# queue size
self.max_queue_size = None
# Statistics
self.current_param_version = 0
self.total_generated_samples = 0
self.staleness_samples = 0
self.dropped_stale_samples = 0
self.processed_sample_count = 0
# we start from step 1
self.global_steps = 1
self.idle_start_time = None
self.version_start_time = None
# Concurrency control
# Modified by self.pause() or self._should_pause_generation()
self.paused = False
self.running = True
self.monitor_loop_trigger = True
# Add dataloader lock
self.dataloader_lock = asyncio.Lock()
# Initialize async queues
self.pending_queue = asyncio.Queue(maxsize=128)
self.active_tasks = set()
self.cancel_queue = asyncio.Queue()
cpu_cores = multiprocessing.cpu_count()
# cpu case use cpu_cores; io case use cpu_cores*2
self.validate_executor = ThreadPoolExecutor(max_workers=cpu_cores)
self.parallel_validate_and_rollout = config.async_training.get("parallel_validate_and_rollout", False)
self.validate_task = None
def _init_async_objects(self):
# Initialize asyncio synchronization primitives.
# We let asyncio.Condition create the Lock internally to ensure they share the same Event Loop.
# This avoids 'ValueError: loop argument must agree with lock' which can occur in Ray environments
# where the lock's captured loop (get_running_loop) differs from Condition's default loop check.
# Explicitly passing the loop is deprecated/removed in Python 3.10+, so this reverse-initialization
# is the most robust workaround.
self.condition = asyncio.Condition()
self.lock = self.condition._lock
async def set_message_queue_client(self, message_queue_client: MessageQueueClient):
"""Set message queue client"""
async with self.lock:
self.message_queue_client = message_queue_client
async def set_max_required_samples(self):
async with self.lock:
self.max_required_samples = int(
self.required_samples
* (self.staleness_threshold + 1)
* self.config.async_training.trigger_parameter_sync_step
)
self.total_train_steps = int(
self.total_rollout_steps
/ (self.required_samples * self.config.async_training.trigger_parameter_sync_step)
)
self.max_concurrent_samples = len(self.async_rollout_manager.server_handles) * 16
self.max_concurrent_samples = min(self.max_concurrent_samples, self.max_required_samples)
self.max_queue_size = self.max_required_samples
print(
f"[FullyAsyncRollouter] required_samples : {self.required_samples} "
f"max_required_samples: {self.max_required_samples} "
f"max_queue_size: {self.max_queue_size} "
f"total_train_steps: {self.total_train_steps} "
f"total_rollout_steps: {self.total_rollout_steps} "
f"max_concurrent_samples: {self.max_concurrent_samples} "
)
def get_rollout_wg(self):
"""Get rollout worker group"""
return self.rollout_wg
def get_max_queue_size(self):
return self.max_queue_size
def get_total_train_steps(self):
return self.total_train_steps
async def update_param_version(
self, version: int, validate: bool = False, global_steps: int = 0, use_trainer_do_validate: bool = False
):
"""Update current parameter version"""
async with self.lock:
old_version = self.current_param_version
self.current_param_version = version
# every time param change, reset staleness_samples
self.staleness_samples = (
len(self.active_tasks) + self.cancel_queue.qsize() + await self.message_queue_client.get_queue_size()
)
timing_raw = {}
idle_ratio = None
if self.idle_start_time is not None and self.version_start_time is not None:
rollout_active_time = self.idle_start_time - self.version_start_time
rollout_version_time = time.time() - self.version_start_time
idle_ratio = 1 - rollout_active_time / rollout_version_time
timing_raw["rollouter/active_time"] = rollout_active_time
timing_raw["rollouter/version_time"] = rollout_version_time
timing_raw["rollouter/idle_ratio"] = idle_ratio
self.idle_start_time = None
print(
f"[FullyAsyncRollouter][Public][update_param_version] "
f"Parameter version updated from {old_version} to {version} "
f",reset staleness_samples to: {self.staleness_samples}"
f",idle_ratio: {idle_ratio}"
)
need_validate = (
(
self.config.rollout.test_freq > 0
and self.current_param_version % self.config.rollout.test_freq == 0
and self.current_param_version > 0
) # don't test here in the initial parameter sync
or validate
)
print(
f"[FullyAsyncRollouter] need_validate: {need_validate}, "
f"parallel_validate_and_rollout: {self.parallel_validate_and_rollout}"
)
if not need_validate:
data = ValidateMetrics(
timing_raw=timing_raw, metrics=None, global_steps=global_steps, param_version=version
)
elif need_validate and not self.parallel_validate_and_rollout:
data = self._validate_wrapper(timing_raw, version, global_steps, use_trainer_do_validate)
if not need_validate or not self.parallel_validate_and_rollout:
await self.message_queue_client.put_validate(ray.cloudpickle.dumps(data))
self.version_start_time = time.time()
if need_validate and self.parallel_validate_and_rollout:
if self.validate_task and not self.validate_task.done():
print("[FullyAsyncRollouter] validate_task is running, wait last validate_task to finish")
self.validate_task.get()
self.validate_task = asyncio.create_task(
self.do_validate_async(timing_raw, version, global_steps, use_trainer_do_validate)
)
def _validate_wrapper(
self, timing_raw: dict, version: int, global_steps: int = 0, use_trainer_do_validate: bool = False
):
val_metrics = None
with marked_timer("rollouter/validate_time", timing_raw, color="green"):
val_metrics: dict = self._validate(use_trainer_do_validate)
data = ValidateMetrics(
timing_raw=timing_raw, metrics=val_metrics, global_steps=global_steps, param_version=version
)
return data
async def do_validate_async(
self,
timing_raw: dict,
version: int,
global_steps: int = 0,
use_trainer_do_validate: bool = False,
):
loop = asyncio.get_running_loop()
data = await loop.run_in_executor(
self.validate_executor,
functools.partial(
self._validate_wrapper,
timing_raw=timing_raw,
version=version,
global_steps=global_steps,
use_trainer_do_validate=use_trainer_do_validate,
),
)
await self.message_queue_client.put_validate(ray.cloudpickle.dumps(data))
async def save_checkpoint(self, local_global_step_folder: str):
# WARNING!: Due to the asynchronous nature, there are some in-flight samples
# (pending/cancel/result queue and message queue).
# Therefore, directly saving the state of the dataloader will result in losing these
# samples when resuming training.
# TODO: Implement dataloader recovery without losing in-flight samples.
from verl.utils.fs import local_mkdir_safe
# save dataloader
local_mkdir_safe(local_global_step_folder)
dataloader_local_path = os.path.join(local_global_step_folder, "data.pt")
async with self.dataloader_lock:
dataloader_state_dict = self.train_dataloader.state_dict()
torch.save(dataloader_state_dict, dataloader_local_path)
print(f"[FullyAsyncRollouter] Saved dataloader checkpoint to {dataloader_local_path}")
def load_checkpoint(self):
"""Load checkpoint including dataloader state based on resume mode"""
if self.config.trainer.resume_mode == "disable":
print("[FullyAsyncRollouter] Resume mode is disabled, starting from scratch")
return 0
# Determine checkpoint folder path
if self.config.trainer.default_hdfs_dir is not None:
raise NotImplementedError("[FullyAsyncRollouter] Load from hdfs is not implemented yet")
else:
checkpoint_folder = self.config.trainer.default_local_dir
if not os.path.isabs(checkpoint_folder):
working_dir = os.getcwd()
checkpoint_folder = os.path.join(working_dir, checkpoint_folder)
global_step_folder = find_latest_ckpt_path(checkpoint_folder)
# Find and validate global_step_folder based on resume mode
if self.config.trainer.resume_mode == "auto":
if global_step_folder is None:
print("[FullyAsyncRollouter] Training from scratch (no checkpoint found)")
return 0
elif self.config.trainer.resume_mode == "resume_path":
assert isinstance(self.config.trainer.resume_from_path, str), (
"[FullyAsyncRollouter] resume_from_path must be str type"
)
assert "global_step_" in self.config.trainer.resume_from_path, (
"[FullyAsyncRollouter] resume_from_path must specify the global_steps"
)
global_step_folder = self.config.trainer.resume_from_path
if not os.path.isabs(global_step_folder):
working_dir = os.getcwd()
global_step_folder = os.path.join(working_dir, global_step_folder)
else:
raise ValueError(f"[FullyAsyncRollouter] Unknown resume_mode: {self.config.trainer.resume_mode}")
print(f"[FullyAsyncRollouter] Loading checkpoint from: {global_step_folder}")
# Extract and set global step
trainer_global_steps = int(global_step_folder.split("global_step_")[-1])
self.global_steps = (
trainer_global_steps * self.required_samples * self.config.async_training.trigger_parameter_sync_step + 1
)
print(f"[FullyAsyncRollouter] Setting global_steps to {self.global_steps}")
# Load dataloader state
dataloader_local_path = os.path.join(global_step_folder, "data.pt")
if os.path.exists(dataloader_local_path):
dataloader_state_dict = torch.load(dataloader_local_path, weights_only=False)
self.train_dataloader.load_state_dict(dataloader_state_dict)
print(f"[FullyAsyncRollouter] Loaded dataloader state from {dataloader_local_path}")
else:
print(
f"[FullyAsyncRollouter] Warning: No dataloader state found at {dataloader_local_path}, "
f"will start from scratch"
)
def _validate_config(self):
# Validate asynchronous training configuration
if not hasattr(self.config, "async_training"):
raise ValueError("[FullyAsyncRollouter] Missing async_training configuration")
assert self.config.actor_rollout_ref.rollout.calculate_log_probs, "must rollout calculate log_probs"
async def init_workers(self):
"""Initialize distributed training workers using Ray backend.
Creates:
1. Ray resource pools from configuration
2. Worker groups for each role (actor, critic, etc.)
"""
self._init_async_objects()
self._init_resource_pools()
self._create_worker_classes()
self._init_worker_groups()
self._init_models()
self._init_reward_loop()
await self._init_async_rollout_manager()
def _create_actor_rollout_classes(self):
# only create rollout
for role in [Role.Rollout]:
resource_pool = self.resource_pool_manager.get_resource_pool(role)
role_cls = RayClassWithInitArgs(
cls=self.role_worker_mapping[role],
config=self.config.actor_rollout_ref,
role=str(role),
)
self.resource_pool_to_cls[resource_pool][str(role)] = role_cls
def _init_models(self):
self.rollout_wg = self.all_wg[str(Role.Rollout)]
self.rollout_wg.init_model()
self.actor_rollout_wg = self.rollout_wg
def _create_continuous_iterator(self):
"""
Create a continuous data iterator across epoch
"""
for epoch in range(self.config.rollout.total_epochs):
iterator = iter(self.train_dataloader)
for batch_dict in iterator:
yield epoch, batch_dict
async def _init_async_rollout_manager(self):
# infrastructure overview: https://verl.readthedocs.io/en/latest/advance/reward_loop.html#architecture-design
# agent_reward_loop: streaming reward computation with actor rollout
# two conditions satisfied: (1) no reward model, or (2) reward model with extra resource pool
enable_agent_reward_loop = not self.use_rm or self.config.reward.reward_model.enable_resource_pool
# if enable_agent_reward_loop, we directly pass reward_loop_workers to agent loop manager
# to stream reward computation with actor rollout
reward_loop_worker_handles = self.reward_loop_manager.reward_loop_workers if enable_agent_reward_loop else None
# create async rollout manager and request scheduler
assert self.config.actor_rollout_ref.rollout.mode == "async"
from verl.experimental.fully_async_policy.agent_loop import FullyAsyncAgentLoopManager
self.async_rollout_mode = True
self.async_rollout_manager = await FullyAsyncAgentLoopManager.create(
config=self.config, worker_group=self.rollout_wg, reward_loop_worker_handles=reward_loop_worker_handles
)
# Add samples to the pending_queue
async def _feed_samples(self):
continuous_iterator = self._create_continuous_iterator()
for epoch, batch_dict in continuous_iterator:
# Similar to _prepare_generate_batch: Separate data
full_batch = prepare_single_generation_data(batch_dict, self.config)
sample_id = f"sample_{epoch}_{self.global_steps}"
rollout_sample = RolloutSample(
full_batch=full_batch,
agent_loop_output_list=[None] * self.config.actor_rollout_ref.rollout.n,
sample_id=sample_id,
epoch=epoch,
param_version=0,
param_version_start=[],
param_version_end=[],
processing_times=[],
tool_calls=[],
rollout_status={},
)
await self.pending_queue.put(rollout_sample)
# Check if have reached the last step
if self.global_steps >= self.total_rollout_steps:
print(
f"[FullyAsyncRollouter][Feed] "
f"Maximum count has been reached, stop adding new samples: "
f"{self.global_steps} >= {self.total_rollout_steps}"
)
break
self.global_steps += 1
# End signal
await self.pending_queue.put("DONE")
print(f"[FullyAsyncRollouter][Feed] Sample addition is complete, {self.global_steps} samples have been added")
async def _processor_worker(self):
"""
Streaming worker coroutines, a sample is submitted for processing without waiting for batches
"""
while True:
if self.paused or await self._should_pause_generation():
print(
"[FullyAsyncRollouter][Processor] Received pause signal, waiting for remaining tasks to return..."
)
async with self.lock:
self.paused = True
while self.active_tasks:
async with self.lock:
# After acquiring the lock, the number of active_tasks may change, need to be verified again
if self.active_tasks:
done_tasks, self.active_tasks = await asyncio.wait(
self.active_tasks, return_when=asyncio.FIRST_COMPLETED
)
for task in done_tasks:
await task
async with self.lock:
while self.paused:
self.idle_start_time = time.time()
await self.condition.wait()
continue
simple_from_cancel_queue = False
if not self.cancel_queue.empty():
rollout_sample = await self.cancel_queue.get()
simple_from_cancel_queue = True
else:
rollout_sample = await self.pending_queue.get()
self.staleness_samples += 1
if rollout_sample == "DONE":
print(
"[FullyAsyncRollouter][Processor] Received end signal, waiting for remaining tasks to complete..."
)
while self.active_tasks:
async with self.lock:
if self.active_tasks:
done_tasks, self.active_tasks = await asyncio.wait(
self.active_tasks, return_when=asyncio.FIRST_COMPLETED
)
for task in done_tasks:
await task
break
# Check whether the number of concurrent tasks exceeds the limit
while len(self.active_tasks) >= self.max_concurrent_samples:
async with self.lock:
if self.active_tasks:
done_tasks, self.active_tasks = await asyncio.wait(
self.active_tasks, return_when=asyncio.FIRST_COMPLETED
)
for task in done_tasks:
await task
# Submit single sample processing
async with self.lock:
# After the pause is over, the lock is acquired and it is necessary
# to determine whether it is the pause phase, otherwise continue to wait
while self.paused:
await self.condition.wait()
task = asyncio.create_task(
self._process_single_sample_streaming(rollout_sample),
name=rollout_sample.sample_id,
)
self.active_tasks.add(task)
if simple_from_cancel_queue:
self.cancel_queue.task_done()
else:
self.pending_queue.task_done()
async def _process_single_sample_streaming(self, rollout_sample: RolloutSample):
"""Process a single sample streamingly"""
# Calling asynchronous generation methods
rollout_sample.full_batch.non_tensor_batch["param_version"] = [self.current_param_version] * len(
rollout_sample.full_batch
)
ret, is_cancel = await self.async_rollout_manager.generate_single_sample_async(
rollout_sample.full_batch, rollout_sample.agent_loop_output_list
)
if not is_cancel:
rollout_sample.full_batch = ret
rollout_sample.full_batch.non_tensor_batch["uid"] = np.array(
[f"uid_{rollout_sample.sample_id}"] * len(rollout_sample.full_batch), dtype=object
)
rollout_sample.param_version = self.current_param_version
rollout_sample.rollout_status = await self.get_statistics()
rollout_sample.agent_loop_output_list = []
success = await self.message_queue_client.put_sample(
sample=ray.cloudpickle.dumps(rollout_sample),
param_version=rollout_sample.param_version,
)
if success:
self.total_generated_samples += 1
else:
self.dropped_stale_samples += 1
else:
rollout_sample.agent_loop_output_list = ret
await self.cancel_queue.put(rollout_sample)
self.processed_sample_count += 1
async def _streaming_generation_main(self):
"""The main entry method for stream processing"""
if self.async_rollout_manager is None:
await self._init_async_rollout_manager()
# Start the streaming loop
print(f"[FullyAsyncRollouter] Start streaming mode, maximum concurrent samples: {self.max_concurrent_samples}")
# Start sample feed coroutine, streaming process coroutine
self.feed_task = asyncio.create_task(self._feed_samples())
self.processor_task = asyncio.create_task(self._processor_worker())
try:
# Wait for sample feed to complete
# Use asyncio.wait to monitor all tasks. If processor exits early,
# detect it instead of blocking on feed_task (it might be stuck on a full queue).
done, pending = await asyncio.wait(
[self.feed_task, self.processor_task], return_when=asyncio.FIRST_COMPLETED
)
for task in done:
if task.exception():
raise task.exception()
if self.feed_task not in done:
raise RuntimeError("Processor task exited prematurely")
print("[FullyAsyncRollouter] Sample feed completed")
# Wait for streaming to complete
await self.processor_task
print("[FullyAsyncRollouter] Streaming process completed")
except Exception as e:
print(f"[FullyAsyncRollouter] Streaming process exception:{e}")
finally:
if self.processor_task:
self.processor_task.cancel()
await asyncio.gather(self.processor_task, return_exceptions=True)
# Send a finish signal
await self.message_queue_client.put_sample(
sample=None,
param_version=self.current_param_version,
)
async with self.lock:
self.running = False
async def fit(self):
"""
Start the async rollouter - entry point that sets up and runs async tasks
Main async fit method that coordinates all coroutines
"""
print("[FullyAsyncRollouter] Starting FullyAsyncRollouter...")
if self.message_queue_client is None:
raise ValueError("MessageQueue client not set. Call set_message_queue_client() first.")
# Set the running status flag
async with self.lock:
self.paused = False
self.running = True
# Create the main asynchronous task
generation_task = asyncio.create_task(self._streaming_generation_main())
monitor_task = asyncio.create_task(self._async_monitor_loop())
try:
# Run build and monitoring tasks concurrently
await asyncio.gather(generation_task, monitor_task, return_exceptions=True)
except Exception as e:
print(f"[FullyAsyncRollouter] Asynchronous task execution error: {e}")
finally:
if not generation_task.done():
generation_task.cancel()
if not monitor_task.done():
monitor_task.cancel()
# Wait for the task to complete
await asyncio.gather(generation_task, monitor_task, return_exceptions=True)
print("[FullyAsyncRollouter] Rollouter fit completed")
async def _async_monitor_loop(self):
"""
Async coroutine for monitoring:
Function 1: Log information output
Function 2: Trigger rollout recovery
"""
last_stats_time = time.time()
stats_interval = 60.0
check_interval = 10.0
while True:
async with self.lock:
if not self.running:
break
await asyncio.sleep(check_interval)
# Print statistics periodically
current_time = time.time()
if current_time - last_stats_time >= stats_interval:
stats = await self.get_statistics()
print(f"[FullyAsyncRollouter][MonitorLoop][Statistics] {pformat(stats)}")
last_stats_time = current_time
# Trigger rollout recovery
if self.monitor_loop_trigger:
if not await self._should_pause_generation():
async with self.lock:
self.paused = False
self.condition.notify_all()
async def _should_pause_generation(self) -> bool:
"""Determine whether the build should be paused"""
queue_stats = self.message_queue_client.get_statistics_sync()
queue_size = queue_stats["queue_size"]
if queue_size >= self.max_queue_size:
if not self.paused:
print(
f"[FullyAsyncRollouter][ShouldPause] "
f"due to full queue: size={queue_size}, max={self.max_queue_size}"
)
return True
if self.staleness_samples >= self.max_required_samples:
if not self.paused:
print(
"[FullyAsyncRollouter][ShouldPause] "
f"due to "
f"staleness_samples {self.staleness_samples} >= max_required_samples {self.max_required_samples} "
)
return True
return False
async def pause(self):
"""pause rollout"""
print("[FullyAsyncRollouter][Public][Pause] partial rollout:", self.config.async_training.partial_rollout)
async with self.lock:
self.paused = True
# Cancel all rollout tasks
if self.config.async_training.partial_rollout:
await self.async_rollout_manager.cancel()
print("[FullyAsyncRollouter][Public][Pause] Unfinished rollout tasks canceled")
if self.active_tasks:
await asyncio.gather(*self.active_tasks, return_exceptions=True)
self.active_tasks.clear()
print("[FullyAsyncRollouter][Public][Pause] All active tasks completed")
# TODO use checkpoint engine for rollout clear_kv_cache
# print("[FullyAsyncRollouter][Public][Pause] clear kv cache")
# # Always clear KV cache to release GPU memory during weight synchronization,
# # regardless of partial_rollout setting.
# await self.async_rollout_manager.clear_kv_cache()
self.monitor_loop_trigger = False
async def resume(self, dependency_ref: ObjectRef = None):
if dependency_ref is not None:
ray.get(dependency_ref)
print("[FullyAsyncRollouter][Public][Resume]")
async with self.lock:
if self.config.async_training.partial_rollout:
await self.async_rollout_manager.resume()
self.paused = False
self.monitor_loop_trigger = True
self.condition.notify_all()
async def get_statistics(self) -> dict:
queue_stats = self.message_queue_client.get_statistics_sync()
stats = {
# monitor stats
"monitor/active_tasks_size": len(self.active_tasks),
"monitor/queue/pending_queue_size": self.pending_queue.qsize(),
"monitor/queue/cancel_queue_size": self.cancel_queue.qsize(),
"monitor/queue/mq_queue_size": queue_stats["queue_size"],
# counting stats
"count/current_param_version": self.current_param_version,
"count/total_generated_samples": self.total_generated_samples,
"count/staleness_samples": self.staleness_samples,
"count/dropped_stale_samples": self.dropped_stale_samples,
# static stats
"static/max_required_samples": self.max_required_samples,
"static/required_samples": self.required_samples,
"static/staleness_threshold": self.staleness_threshold,
"static/max_queue_size": self.max_queue_size,
"static/max_concurrent_samples": self.max_concurrent_samples,
}
return stats
|
verl__experimental__fully_async_policy__fully_async_rollouter.py
|
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from datetime import datetime
from pprint import pprint
from typing import Any
import ray
from tqdm import tqdm
from verl import DataProto
from verl.experimental.fully_async_policy.detach_utils import (
MetricsAggregator,
ValidateMetrics,
assemble_batch_from_rollout_samples,
)
from verl.experimental.fully_async_policy.message_queue import MessageQueueClient
from verl.experimental.separation.ray_trainer import SeparateRayPPOTrainer
from verl.single_controller.ray import RayClassWithInitArgs, RayWorkerGroup
from verl.trainer.ppo import core_algos
from verl.trainer.ppo.ray_trainer import ResourcePoolManager
from verl.trainer.ppo.utils import Role, WorkerType, need_critic, need_reference_policy, need_reward_model
from verl.utils.checkpoint.checkpoint_manager import find_latest_ckpt_path, should_save_ckpt_esi
from verl.utils.debug import marked_timer
class TrainingStopException(Exception):
"""Exception raised to signal training should stop"""
pass
@ray.remote(num_cpus=10)
class FullyAsyncTrainer(SeparateRayPPOTrainer):
"""
A fully asynchronous PPO trainer that obtains samples from a MessageQueue for training.
Based on an improved implementation of OneStepOffRayTrainer
"""
def __init__(
self,
config,
tokenizer,
role_worker_mapping: dict[Role, WorkerType],
resource_pool_manager: ResourcePoolManager,
ray_worker_group_cls: RayWorkerGroup = RayWorkerGroup,
processor=None,
device_name=None,
):
# ==================== RayPPOTrainer config ====================
# Store the tokenizer for text processing
self.tokenizer = tokenizer
self.processor = processor
self.config = config
self.hybrid_engine = config.actor_rollout_ref.hybrid_engine
assert not self.hybrid_engine
self.role_worker_mapping = role_worker_mapping
self.resource_pool_manager = resource_pool_manager
self.use_reference_policy = need_reference_policy(self.config)
self.use_rm = need_reward_model(self.config)
self.use_critic = need_critic(self.config)
self.ray_worker_group_cls = ray_worker_group_cls
self.device_name = device_name if device_name else self.config.trainer.device
# if ref_in_actor is True, the reference policy will be actor without lora applied
lora_rank = config.actor_rollout_ref.model.get("lora", {}).get("rank", 0)
if lora_rank <= 0:
lora_rank = config.actor_rollout_ref.model.get("lora_rank", 0)
self.ref_in_actor = lora_rank > 0 or config.actor_rollout_ref.model.get("lora_adapter_path") is not None
# define in-reward KL control
# kl loss control currently not suppoorted
if self.config.algorithm.use_kl_in_reward:
self.kl_ctrl_in_reward = core_algos.get_kl_controller(self.config.algorithm.kl_ctrl)
self.use_prefix_grouper = self.config.actor_rollout_ref.actor.get("use_prefix_grouper", False)
self.use_legacy_worker_impl = config.trainer.get("use_legacy_worker_impl", "auto")
# ==================== SeparateRayPPOTrainer config ====================
self.global_steps = 0
self.epoch = 0
self.max_steps_duration = 0
self.progress_bar = None
self.logger = None
self.is_last_step = False
self.prev_step_profile = False
self.curr_step_profile = False
self.next_step_profile = False
self.last_val_metrics = {}
self.metrics = {}
self.timing_raw = {}
# reward message
self.future_reward = None
self.reward_tensor = None
self.reward_extra_infos_dict = {}
# ==================== fully async config ====================
self.message_queue_client = None
self.param_synchronizer = None
# Statistics
# we start from step 1
self.global_steps = 1
self.local_trigger_step = 1
self.processed_samples = 0
self.stale_samples_processed = 0
self.stale_trajectory_processed = 0
self.current_param_version = 0
self.total_train_steps = None
self.progress_bar = None
self.trigger_parameter_sync_step = config.async_training.trigger_parameter_sync_step
self.last_ckpt_version = 0
self.train_val_metrics = None
self.train_role = Role.ActorRollout if config.async_training.use_trainer_do_validate else Role.Actor
# required_samples use ppo_mini_batch_size*require_batches as the minimum number of samples.
self.require_batches = config.async_training.require_batches
self.required_samples = config.actor_rollout_ref.actor.ppo_mini_batch_size * self.require_batches
total_gpus = (
config.trainer.nnodes * config.trainer.n_gpus_per_node
+ config.rollout.nnodes * config.rollout.n_gpus_per_node
)
self.metrics_aggregator = MetricsAggregator(total_gpus=total_gpus)
# use trainer to do validation
if self.config.async_training.use_trainer_do_validate:
from verl.trainer.main_ppo import create_rl_dataset
from verl.utils.dataset.rl_dataset import collate_fn
val_dataset = create_rl_dataset(config.data.val_files, config.data, tokenizer, processor)
rollout_gpus = config.rollout.nnodes * config.rollout.n_gpus_per_node
print(f"[FullyAsyncTrainer] split before val_dataset total len: {len(val_dataset)}")
split_dataset = val_dataset.split(total_gpus)
rollout_val_dataset0 = split_dataset[rollout_gpus:]
from torch.utils.data import ConcatDataset
val_dataset = ConcatDataset(rollout_val_dataset0)
print(f"[FullyAsyncTrainer] split after val_dataset total len: {len(val_dataset)}")
self.val_dataset = val_dataset
# update val_dataloader
val_batch_size = self.config.data.val_batch_size # Prefer config value if set
if val_batch_size is None:
val_batch_size = len(val_dataset)
from torchdata.stateful_dataloader import StatefulDataLoader
print(f"[FullyAsyncTrainer] create val_dataloader with batch_size: {val_batch_size}")
self.val_dataloader = StatefulDataLoader(
dataset=val_dataset,
batch_size=val_batch_size,
num_workers=self.config.data["dataloader_num_workers"],
shuffle=self.config.data.get("validation_shuffle", True),
drop_last=False,
collate_fn=collate_fn,
)
def set_message_queue_client(self, message_queue_client: MessageQueueClient):
"""Set message queue client"""
self.message_queue_client = message_queue_client
def set_parameter_synchronizer(self, param_synchronizer):
"""Set parameter synchronizer"""
self.param_synchronizer = param_synchronizer
def set_total_train_steps(self, total_train_steps):
self.total_train_steps = total_train_steps
self.progress_bar = tqdm(total=self.total_train_steps, initial=0, desc="Training Progress")
def get_actor_wg(self):
"""Get actor worker group"""
return self.actor_wg
def _get_samples_from_queue(self) -> tuple[None, None] | tuple[int, Any]:
"""
Get samples from message queue and compose gen_batch_output
Uses a loop to continuously collect samples until enough are gathered
Returns:
tuple: (epoch, batch_dict, gen_batch_output)
"""
print(
f"[FullyAsyncTrainer] Requesting {self.required_samples} samples from queue",
flush=True,
)
# Collect samples using a simple loop calling get_sample
consumer_start = time.time()
queue_samples = []
queue_len = 0
while len(queue_samples) < self.required_samples:
# Get a single sample and wait until there is a sample or None is received
sample, queue_len = self.message_queue_client.get_sample_sync()
if sample is None:
print(
f"[FullyAsyncTrainer] Detected termination signal (None), stopping sample collection. "
f"Collected {len(queue_samples)}/{self.required_samples} samples"
)
break
queue_samples.append(sample)
if len(queue_samples) % 64 == 0:
print(
f"[FullyAsyncTrainer] Collected {len(queue_samples)}/{self.required_samples} samples. "
f"mq_len: {queue_len}"
)
consumer_end = time.time()
if not queue_samples or len(queue_samples) < self.required_samples:
print("[FullyAsyncTrainer] not enough samples collected after loop")
return None, None
total_wait_time = consumer_end - consumer_start
print(
f"[FullyAsyncTrainer] Loop collection completed: {len(queue_samples)}/{self.required_samples} samples, "
f"total wait time: {total_wait_time:.2f} seconds."
f"mq_len: {queue_len}"
)
queue_samples = [ray.cloudpickle.loads(x) for x in queue_samples]
# Assemble batch - now working directly with RolloutSample objects
if self.config.trainer.balance_batch:
batch = assemble_batch_from_rollout_samples(queue_samples, self.tokenizer, self.config, self._balance_batch)
else:
batch = assemble_batch_from_rollout_samples(queue_samples, self.tokenizer, self.config, None)
batch.meta_info["fully_async/total_wait_time"] = total_wait_time
return 0, batch
def _create_actor_rollout_classes(self):
# create actor
for role in [self.train_role]:
resource_pool = self.resource_pool_manager.get_resource_pool(role)
role_cls = RayClassWithInitArgs(
cls=self.role_worker_mapping[role],
config=self.config.actor_rollout_ref,
role=str(role),
)
self.resource_pool_to_cls[resource_pool][str(role)] = role_cls
def _init_models(self):
if self.use_critic:
self.critic_wg = self.all_wg[str(Role.Critic)]
self.critic_wg.init_model()
if self.use_reference_policy and not self.ref_in_actor:
self.ref_policy_wg = self.all_wg[str(Role.RefPolicy)]
self.ref_policy_wg.init_model()
if self.use_rm:
self.rm_wg = self.all_wg[str(Role.RewardModel)]
self.rm_wg.init_model()
self.actor_wg = self.all_wg[str(self.train_role)]
self.actor_wg.init_model()
self.actor_rollout_wg = self.actor_wg # to be compatible with the functions that not be modified
async def init_workers(self):
"""Initialize distributed training workers using Ray backend.
Creates:
1. Ray resource pools from configuration
2. Worker groups for each role (actor, critic, etc.)
"""
# self._init_async_objects()
self._init_resource_pools()
self._create_worker_classes()
self._init_worker_groups()
self._init_models()
await self._init_async_rollout_manager()
async def _init_async_rollout_manager(self):
# use async rollout do validate
print(f"[FullyAsyncTrainer] use_trainer_do_validate: {self.config.async_training.use_trainer_do_validate}")
if self.config.async_training.use_trainer_do_validate:
print("[FullyAsyncTrainer] Init async rollout manager")
# infrastructure overview: https://verl.readthedocs.io/en/latest/advance/reward_loop.html#architecture-design
# agent_reward_loop: streaming reward computation with actor rollout
# two conditions satisfied: (1) no reward model, or (2) reward model with extra resource pool
enable_agent_reward_loop = not self.use_rm or self.config.reward.reward_model.enable_resource_pool
# if enable_agent_reward_loop, we directly pass reward_loop_workers to agent loop manager
# to stream reward computation with actor rollout
reward_loop_worker_handles = (
self.reward_loop_manager.reward_loop_workers if enable_agent_reward_loop else None
)
# create async rollout manager and request scheduler
assert self.config.actor_rollout_ref.rollout.mode == "async"
from verl.experimental.fully_async_policy.agent_loop import FullyAsyncAgentLoopManager
self.async_rollout_mode = True
self.async_rollout_manager = await FullyAsyncAgentLoopManager.create(
config=self.config,
worker_group=self.actor_rollout_wg,
reward_loop_worker_handles=reward_loop_worker_handles,
)
print("[FullyAsyncTrainer] async_rollout_manager sleep")
await self.async_rollout_manager.sleep()
else:
print("[FullyAsyncTrainer] Skip async rollout manager (use_trainer_do_validate=False)")
async def fit(self):
"""
The training loop of PPO.
The driver process only need to call the compute functions of the worker group through RPC
to construct the PPO dataflow.
The light-weight advantage computation is done on the driver process.
"""
print("[FullyAsyncTrainer] Starting FullyAsyncTrainer...")
if self.message_queue_client is None:
raise ValueError("MessageQueue client not set. Call set_message_queue_client() first.")
if self.param_synchronizer is None:
raise ValueError("param_synchronizer client not set. Call set_parameter_synchronizer() first.")
from omegaconf import OmegaConf
from verl.utils.tracking import Tracking
self.logger = Tracking(
project_name=self.config.trainer.project_name,
experiment_name=self.config.trainer.experiment_name,
default_backend=self.config.trainer.logger,
config=OmegaConf.to_container(self.config, resolve=True),
)
self.max_steps_duration = 0
# get validate data before training
self._log_validation_data()
# Use queue mode, no need for traditional dataloader iterator
# Initialize to get the first batch of data
while True:
try:
await self.fit_step()
except TrainingStopException:
print("[FullyAsyncTrainer] Training stopped by queue termination signal")
break
# final parameter sync and validate
# 1. waiting remaining validate task
ray.get(self.param_synchronizer.wait_last_valid.remote())
self._log_validation_data()
# 2. perform addtional parameter_sync and validate if trainer already updated
if self.current_param_version % self.config.rollout.test_freq != 0 or self.local_trigger_step > 1:
await self._trigger_parameter_sync_after_step(validate=True, global_steps=self.global_steps)
ray.get(self.param_synchronizer.wait_last_valid.remote())
self._log_validation_data()
self.progress_bar.close()
self._fit_save_checkpoint()
async def fit_step(self, batch_dict: dict = None):
"""
Single-step training template method. Handles all logic for one training step.
Flow:
1. Pre-step processing -> 2. Get batch -> 3. Generate sequences ->
4. Compute reward -> 5. Compute log_prob -> 6. Compute reward ->
7. Compute advantage -> 8. Update critic -> 9. Update actor -> 10. Post-step processing
Args:
batch_dict: Raw data dictionary
"""
print("[FullyAsyncTrainer] fit_step")
self.metrics = {"training/global_step": self.global_steps, "training/epoch": self.epoch}
self.timing_raw = {}
# reward message
self.future_reward = None
self.reward_tensor = None
self.reward_extra_infos_dict = {}
# self._fit_prepare_step()
self._fit_start_profile()
with marked_timer("step", self.timing_raw):
batch = self._fit_generate(None)
batch = self._fit_compute_reward(batch)
batch = self._fit_compute_log_prob(batch)
batch = self._fit_compute_ref_log_prob(batch)
batch = self._fit_compute_critic(batch)
batch = self._fit_compute_advantage(batch)
batch = self._fit_update_critic(batch)
batch = self._fit_update_actor(batch)
await self._fit_update_weights()
self._fit_dump_data(batch)
# self._fit_validate()
self._fit_save_checkpoint()
self._fit_stop_profile()
self._fit_collect_metrics(batch)
self._fit_torch_memory()
# self._fit_experimental(batch)
self._fit_postprocess_step()
def _fit_generate(self, batch: DataProto = None) -> DataProto:
metrics = self.metrics
timing_raw = self.timing_raw
with marked_timer("gen", timing_raw, color="red"):
epoch, batch = self._get_samples_from_queue()
if batch is None:
raise TrainingStopException("Training terminated: queue returned None")
self._collect_metrics_from_samples(batch, metrics)
return batch
def _compute_old_log_prob(self, batch: DataProto):
"""
If algorithm.rollout_correction.bypass_mode is False,
use model engine and first version model params to re-calculate old_log_prob.
If local_trigger_step == 1, load the training engine's parameters to the CPU
and save a copy for subsequent MIS use.
If local_trigger_step == 2, 3, ..., restore the parameters of version 1 to calculate the old_log_prob,
then restore the parameters of the current version.
"""
if self.local_trigger_step == 1:
self.actor_rollout_wg.save_model_to_cpu(1)
old_log_prob, old_log_prob_mfu = super()._compute_old_log_prob(batch)
else:
self.actor_rollout_wg.save_model_to_cpu(self.local_trigger_step)
self.actor_rollout_wg.restore_model_from_cpu(1)
old_log_prob, old_log_prob_mfu = super()._compute_old_log_prob(batch)
self.actor_rollout_wg.restore_model_from_cpu(self.local_trigger_step)
self.actor_rollout_wg.clear_cpu_model(self.local_trigger_step)
return old_log_prob, old_log_prob_mfu
def _fit_collect_metrics(self, batch):
super()._fit_collect_metrics(batch)
self.metrics_aggregator.add_step_metrics(
metrics=self.metrics, sample_count=self.required_samples, timestamp=time.time()
)
self._log_validation_data()
async def _fit_update_weights(self):
# with marked_timer("update_weights", self.timing_raw, color="red"):
# self.checkpoint_manager.update_weights()
# Trigger parameter synchronization after training step
time_str = datetime.now().strftime("%H:%M:%S.%f")[:-3]
print(
f"[FullyAsyncTrainer] global_steps: {self.global_steps} "
f"local_trigger_step: {self.local_trigger_step} "
f"trigger_parameter_sync_step: {self.trigger_parameter_sync_step} "
f"{time_str}"
)
await self._trigger_parameter_sync_after_step()
def _fit_save_checkpoint(self):
timing_raw = self.timing_raw
# Check if the ESI (Elastic Server Instance)/training plan is close to expiration.
esi_close_to_expiration = should_save_ckpt_esi(
max_steps_duration=self.max_steps_duration,
redundant_time=self.config.trainer.esi_redundant_time,
)
# Check if the conditions for saving a checkpoint are met.
# The conditions include a mandatory condition (1) and
# one of the following optional conditions (2/3/4):
# 1. The save frequency is set to a positive value.
# 2. It's the last training step.
# 3. The current step number is a multiple of the save frequency.
# 4. The ESI(Elastic Server Instance)/training plan is close to expiration.
if self.config.trainer.save_freq > 0 and (
self.current_param_version % self.config.trainer.save_freq == 0 or esi_close_to_expiration
):
if esi_close_to_expiration:
print("Force saving checkpoint: ESI instance expiration approaching.")
with marked_timer("save_checkpoint", timing_raw, color="green"):
# sleep replicas to avoid OOM during checkpoint saving
# self.checkpoint_manager.sleep_replicas()
self._save_checkpoint()
# wake replicas to avoid OOM during checkpoint saving
# self.checkpoint_manager.update_weights()
def _fit_postprocess_step(self):
self.global_steps += 1
def _save_checkpoint(self):
# Warning: Currently, to align the training process and metrics of colocate,
# we use current_param_version instead of global step.
# This can be logically aligned with the original self.global_steps of colocate
# and is used for metrics and ckpt. which means that the parameter synchronization
# from trainer to rollouter will increase by 1 each time.
# path: given_path + `/global_step_{global_steps}` + `/actor`
local_global_step_folder = os.path.join(
self.config.trainer.default_local_dir, f"global_step_{self.current_param_version}"
)
print(f"[FullyAsyncTrainer] local_global_step_folder: {local_global_step_folder}")
actor_local_path = os.path.join(local_global_step_folder, "actor")
actor_remote_path = (
None
if self.config.trainer.default_hdfs_dir is None
else os.path.join(
self.config.trainer.default_hdfs_dir, f"global_step_{self.current_param_version}", "actor"
)
)
remove_previous_ckpt_in_save = self.config.trainer.get("remove_previous_ckpt_in_save", False)
if remove_previous_ckpt_in_save:
print(
"[FullyAsyncTrainer] Warning: remove_previous_ckpt_in_save is deprecated,"
+ " set max_actor_ckpt_to_keep=1 and max_critic_ckpt_to_keep=1 instead"
)
max_actor_ckpt_to_keep = (
self.config.trainer.get("max_actor_ckpt_to_keep", None) if not remove_previous_ckpt_in_save else 1
)
max_critic_ckpt_to_keep = (
self.config.trainer.get("max_critic_ckpt_to_keep", None) if not remove_previous_ckpt_in_save else 1
)
self.actor_rollout_wg.save_checkpoint(
actor_local_path, actor_remote_path, self.current_param_version, max_ckpt_to_keep=max_actor_ckpt_to_keep
)
if self.use_critic:
critic_local_path = os.path.join(local_global_step_folder, str(Role.Critic))
critic_remote_path = (
None
if self.config.trainer.default_hdfs_dir is None
else os.path.join(
self.config.trainer.default_hdfs_dir, f"global_step_{self.current_param_version}", str(Role.Critic)
)
)
self.critic_wg.save_checkpoint(
critic_local_path,
critic_remote_path,
self.current_param_version,
max_ckpt_to_keep=max_critic_ckpt_to_keep,
)
ray.get(self.param_synchronizer.rollouter_save_checkpoint.remote(local_global_step_folder))
# latest checkpointed iteration tracker (for atomic usage)
local_latest_checkpointed_iteration = os.path.join(
self.config.trainer.default_local_dir, "latest_checkpointed_iteration.txt"
)
with open(local_latest_checkpointed_iteration, "w") as f:
f.write(str(self.current_param_version))
def load_checkpoint(self):
if self.config.trainer.resume_mode == "disable":
# NOTE: while there is no checkpoint to load, we still need to offload the model and optimizer to CPU
self.actor_rollout_wg.load_checkpoint(None)
return 0
# load from hdfs
if self.config.trainer.default_hdfs_dir is not None:
raise NotImplementedError("load from hdfs is not implemented yet")
else:
checkpoint_folder = self.config.trainer.default_local_dir # TODO: check path
if not os.path.isabs(checkpoint_folder):
working_dir = os.getcwd()
checkpoint_folder = os.path.join(working_dir, checkpoint_folder)
global_step_folder = find_latest_ckpt_path(checkpoint_folder) # None if no latest
# find global_step_folder
if self.config.trainer.resume_mode == "auto":
if global_step_folder is None:
print("[FullyAsyncTrainer] Training from scratch")
self.actor_rollout_wg.load_checkpoint(None)
return 0
else:
if self.config.trainer.resume_mode == "resume_path":
assert isinstance(self.config.trainer.resume_from_path, str), "resume ckpt must be str type"
assert "global_step_" in self.config.trainer.resume_from_path, (
"resume ckpt must specify the global_steps"
)
global_step_folder = self.config.trainer.resume_from_path
if not os.path.isabs(global_step_folder):
working_dir = os.getcwd()
global_step_folder = os.path.join(working_dir, global_step_folder)
print(f"[FullyAsyncTrainer] Load from checkpoint folder: {global_step_folder}")
# set global step
self.current_param_version = int(global_step_folder.split("global_step_")[-1])
self.global_steps = self.current_param_version * self.trigger_parameter_sync_step + 1
self.last_ckpt_version = self.current_param_version
print(
f"[FullyAsyncTrainer] Setting global step to {self.global_steps}, "
f"current_param_version to {self.current_param_version}"
)
print(f"[FullyAsyncTrainer] Resuming from {global_step_folder}")
actor_path = os.path.join(global_step_folder, "actor")
critic_path = os.path.join(global_step_folder, str(Role.Critic))
# load actor
self.actor_rollout_wg.load_checkpoint(
actor_path, del_local_after_load=self.config.trainer.del_local_ckpt_after_load
)
# load critic
if self.use_critic:
self.critic_wg.load_checkpoint(
critic_path, del_local_after_load=self.config.trainer.del_local_ckpt_after_load
)
return self.current_param_version
def _collect_metrics_from_samples(self, batch, metrics):
"""
Collect metrics from samples
"""
if hasattr(batch, "meta_info") and batch.meta_info:
samples_param_versions = batch.meta_info["rollout_param_versions"]
stale_count = sum(1 for v in samples_param_versions if self.current_param_version - v >= 1)
self.stale_samples_processed += stale_count
trajectory_param_versions = batch.meta_info["trajectory_param_versions"]
stale_traj_count = sum(1 for v in trajectory_param_versions if self.current_param_version - v >= 1)
self.stale_trajectory_processed += stale_traj_count
metrics.update(
{
"fully_async/count/stale_samples_processed": self.stale_samples_processed,
"fully_async/count/stale_trajectory_processed": self.stale_trajectory_processed,
"fully_async/count/current_param_version": self.current_param_version,
}
)
for key, value in batch.meta_info.items():
if key.startswith("fully_async") or key.startswith("timing_s"):
metrics[key] = value
async def _trigger_parameter_sync_after_step(self, validate: bool = False):
"""
Trigger parameter synchronization after training step
This ensures rollouter always uses the latest trained parameters
"""
if self.local_trigger_step < self.trigger_parameter_sync_step and not validate:
self.local_trigger_step += 1
return
self.current_param_version += 1
self.local_trigger_step = 1
self.logger.log(
data=self.metrics_aggregator.get_aggregated_metrics(),
step=self.current_param_version,
)
self.progress_bar.update(1)
self.metrics_aggregator.reset()
timing_param_sync = {}
with marked_timer("timing_s/wait_last_valid", timing_param_sync):
ray.get(self.param_synchronizer.wait_last_valid.remote())
with marked_timer("timing_s/param_sync", timing_param_sync):
ray.get(
self.param_synchronizer.sync_weights.remote(
self.current_param_version,
validate=validate,
global_steps=self.global_steps,
use_trainer_do_validate=self.config.async_training.use_trainer_do_validate,
)
)
# do trainer validate
do_validate_param = (
self.config.rollout.test_freq > 0
and self.current_param_version % self.config.rollout.test_freq == 0
and self.current_param_version > 0
)
print(f"do_validate_param: {do_validate_param}")
if do_validate_param and self.config.async_training.use_trainer_do_validate:
print(f"[FullyAsyncTrainer] validate param version: {self.current_param_version}")
await self._validate_process()
else:
self.train_val_metrics = None
self.logger.log(data=timing_param_sync, step=self.current_param_version)
def _log_validation_data(self):
"""
Log validation data
"""
val_data = self.message_queue_client.get_validate_sync()
if not val_data:
return
val_metrics: ValidateMetrics = ray.cloudpickle.loads(val_data)
if self.train_val_metrics and self.config.async_training.use_trainer_do_validate:
# merge info
timing_param_sync = {}
with marked_timer("timing_s/merge_val", timing_param_sync):
new_metrics = self._merge_validation_results(self.train_val_metrics, val_metrics.metrics)
if new_metrics:
self.logger.log(data=new_metrics, step=val_metrics.param_version)
pprint(
f"[FullyAsyncTrainer] parameter version: {val_metrics.param_version} "
f"Validation metrics: {new_metrics}, timing_param_sync: {timing_param_sync['timing_s/merge_val']}"
)
self.logger.log(data=val_metrics.timing_raw, step=val_metrics.param_version)
else:
if val_metrics.metrics:
self.logger.log(data=val_metrics.metrics, step=val_metrics.param_version)
pprint(
f"[FullyAsyncTrainer] parameter version: {val_metrics.param_version} "
f"Validation metrics: {val_metrics.metrics}"
)
self.logger.log(data=val_metrics.timing_raw, step=val_metrics.param_version)
async def _validate_process(self):
if self.config.async_training.use_trainer_do_validate:
print("[FullyAsyncTrainer] _validate_process")
from verl.utils.profiler import marked_timer
timing_raw = {}
await self.async_rollout_manager.wake_up()
with marked_timer("trainer/validate_time", timing_raw):
self.train_val_metrics = self._validate(True)
await self.async_rollout_manager.sleep()
print(f"[FullyAsyncTrainer] validate timing_raw validate: {timing_raw['trainer/validate_time']}")
else:
self.train_val_metrics = None
print("[FullyAsyncTrainer] _validate_process without async_rollout_manager")
|
verl__experimental__fully_async_policy__fully_async_trainer.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from megatron.core.distributed import DistributedDataParallel as DDP
@torch.no_grad()
def copy_megatron_model_to_cpu(models):
"""
Copy Megatron model parameters to CPU memory (non-destructive copy).
Unlike offload_megatron_model_to_cpu which moves data, this function creates
independent copies on CPU while keeping GPU data intact.
Args:
models: List of model chunks (DDP-wrapped or unwrapped)
Returns:
dict: CPU state containing copied parameters and buffers
"""
cpu_state = {}
for model_idx, model_chunk in enumerate(models):
if isinstance(model_chunk, DDP):
# Handle DDP-wrapped models
model_chunk_all_buffers = [model_chunk.buffers, model_chunk.expert_parallel_buffers]
buffer_states = []
for buffers in model_chunk_all_buffers:
buffer_list = []
for buffer in buffers:
buffer_state = {}
# Copy parameter data to CPU
if buffer.param_data.storage().size() > 0:
buffer_state["param_data"] = buffer.param_data.data.cpu().clone().pin_memory()
buffer_list.append(buffer_state)
buffer_states.append(buffer_list)
cpu_state[f"model_chunk_{model_idx}"] = {"buffer_states": buffer_states, "is_ddp": True}
else:
# Handle non-DDP models (ref module)
model_state = {}
for name, param in model_chunk.named_parameters():
param_state = {"data": param.data.cpu().clone().pin_memory()}
model_state[name] = param_state
cpu_state[f"model_chunk_{model_idx}"] = {"model_state": model_state, "is_ddp": False}
return cpu_state
@torch.no_grad()
def restore_megatron_model_from_cpu(models, cpu_state):
"""
Restore Megatron model parameters from CPU memory back to GPU.
Args:
models: List of model chunks to restore to
cpu_state: CPU state dict returned from copy_megatron_model_to_cpu
"""
for model_idx, model_chunk in enumerate(models):
chunk_key = f"model_chunk_{model_idx}"
if chunk_key not in cpu_state:
continue
chunk_state = cpu_state[chunk_key]
if chunk_state["is_ddp"] and isinstance(model_chunk, DDP):
# Restore DDP buffers
model_chunk_all_buffers = [model_chunk.buffers, model_chunk.expert_parallel_buffers]
buffer_states = chunk_state["buffer_states"]
for buffers, buffer_list in zip(model_chunk_all_buffers, buffer_states, strict=False):
for buffer, buffer_state in zip(buffers, buffer_list, strict=False):
# Restore parameter data
if "param_data" in buffer_state:
buffer.param_data.data.copy_(buffer_state["param_data"].to(buffer.param_data.device))
elif not chunk_state["is_ddp"] and not isinstance(model_chunk, DDP):
# Restore non-DDP models
model_state = chunk_state["model_state"]
for name, param in model_chunk.named_parameters():
if name in model_state:
param_state = model_state[name]
param.data.copy_(param_state["data"].to(param.device))
|
verl__experimental__fully_async_policy__megatron_utils.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright 2025 Meituan Ltd. and/or its affiliates
# Copyright 2025 NVIDIA Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import time
import torch
import torch.distributed
from omegaconf import DictConfig
from verl.experimental.fully_async_policy.base_detach_sync import BaseDetachNcclSync
from verl.experimental.fully_async_policy.megatron_utils import (
copy_megatron_model_to_cpu,
restore_megatron_model_from_cpu,
)
from verl.single_controller.base.decorator import Dispatch, register
from verl.utils.device import (
get_device_name,
get_torch_device,
)
from verl.utils.megatron_utils import load_megatron_model_to_gpu, offload_megatron_model_to_cpu, per_tensor_generator
from verl.workers.megatron_workers import AsyncActorRolloutRefWorker, CriticWorker
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
device_name = get_device_name()
__all__ = ["DetachActorWorker", "DetachAsyncRolloutWorker", "CriticWorker"]
class DetachNcclSync(BaseDetachNcclSync, AsyncActorRolloutRefWorker):
def __init__(self, config: DictConfig, role: str):
BaseDetachNcclSync.__init__(self, config, role)
AsyncActorRolloutRefWorker.__init__(self, config, role)
def _get_actor_params(self):
pass
@register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False)
def sync_rollout_weights(self, sync_group_name="actor_rollout"):
assert (self._is_actor or self._is_rollout) and not self.config.hybrid_engine
assert hasattr(self, "_weights_info") and self._weights_info is not None
if self._is_actor and self._is_offload_param:
load_megatron_model_to_gpu(self.actor_module, False)
params_generator = self._get_actor_params_generator() if self._is_actor else None
params = {key: tensor for key, tensor in params_generator} if params_generator is not None else None
rollout_name = self.config.rollout.name
inference_model = None
if self._is_rollout and (not self._is_actor):
if rollout_name == "vllm":
inference_model = BaseDetachNcclSync.get_inference_model(self.rollout)
from verl.utils.vllm.patch import patch_vllm_moe_model_weight_loader
patch_vllm_moe_model_weight_loader(inference_model)
elif rollout_name == "sglang":
inference_model = self.rollout._engine
if inference_model is None:
print("[sync_rollout_weights] Initialize server adapter engine")
async def init_engine():
if hasattr(self.rollout, "_init_server_adapter"):
await self.rollout._init_server_adapter()
else:
print("[sync_rollout_weights] No _init_server_adapter method found")
return self.rollout._engine
inference_model = self._run_async_safely(init_engine())
# For ServerAdapter, only TP rank 0 initializes the engine
# TP rank != 0 can safely have inference_model as None
from verl.workers.rollout.sglang_rollout.sglang_rollout import ServerAdapter
is_server_adapter = isinstance(self.rollout, ServerAdapter)
is_non_tp_rank = False
if (
is_server_adapter
and hasattr(self.rollout, "device_mesh")
and self.rollout.device_mesh is not None
):
try:
is_non_tp_rank = self.rollout.device_mesh["infer_tp"].get_local_rank() != 0
except Exception:
pass
if inference_model is None and not (is_server_adapter and is_non_tp_rank):
raise RuntimeError(
f"Failed to initialize rollout engine. "
f"rollout type: {type(self.rollout)}, "
f"has _init_server_adapter: {hasattr(self.rollout, '_init_server_adapter')}"
)
else:
raise NotImplementedError(f"Unknown rollout name: {rollout_name}")
if rollout_name == "sglang" and self._is_rollout:
self._sync_sglang_weights(inference_model, params, sync_group_name)
else:
self._sync_vllm_weights(inference_model, params, sync_group_name)
if self._is_actor and self._is_offload_param:
offload_megatron_model_to_cpu(self.actor_module)
get_torch_device().empty_cache()
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def save_model_to_cpu(self, n):
if not hasattr(self, "cpu_saved_models"):
self.cpu_saved_models = {}
self.cpu_saved_models[n] = copy_megatron_model_to_cpu(self.actor.actor_module)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def restore_model_from_cpu(self, n):
if n in self.cpu_saved_models:
restore_megatron_model_from_cpu(self.actor.actor_module, self.cpu_saved_models[n])
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def clear_cpu_model(self, n):
if n in self.cpu_saved_models:
del self.cpu_saved_models[n]
def cache_actor_weights_to_cpu(self):
self.cpu_named_params = {}
if self._is_actor:
params_generator = self._get_actor_params_generator()
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
print(f"cache_actor_weights_to_cpu, local_rank:{local_rank}, world_size:{world_size}")
for tensor_idx, (key, tensor) in enumerate(params_generator):
if tensor_idx % world_size == local_rank:
self.cpu_named_params[key] = tensor.to("cpu", non_blocking=True)
get_torch_device().synchronize()
@register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False)
def sync_rollout_weights_by_checkpoint(self, sync_group_name="actor_rollout"):
assert (self._is_actor or self._is_rollout) and not self.config.hybrid_engine
assert hasattr(self, "_weights_info") and self._weights_info is not None
# Load model to GPU
load_start_time = time.time()
if self._is_actor and self._is_offload_param:
load_megatron_model_to_gpu(self.actor_module, False)
load_duration = time.time() - load_start_time
from ray.util.collective import collective
# Cache actor weights to CPU and measure the time taken
cache_start_time = time.time()
self.cache_actor_weights_to_cpu()
cache_end_time = time.time()
cache_duration = cache_end_time - cache_start_time
# Register the cached weights into the checkpoint engine
self.checkpoint_engine.register_checkpoint(self._weights_info, self.cpu_named_params)
register_end_time = time.time()
register_duration = register_end_time - cache_end_time
self.cpu_named_params = {}
collective.barrier(group_name=sync_group_name)
update_start_time = time.time()
rollout_name = self.config.rollout.name
inference_model = None
if self._is_rollout and (not self._is_actor):
if rollout_name == "vllm":
inference_model = BaseDetachNcclSync.get_inference_model(self.rollout)
from verl.utils.vllm.patch import patch_vllm_moe_model_weight_loader
patch_vllm_moe_model_weight_loader(inference_model)
elif rollout_name == "sglang":
inference_model = self.rollout._engine
# For ServerAdapter, _engine might be None and needs async initialization
if inference_model is None:
# Initialize the server adapter engine
print("[sync_rollout_weights] Initialize server adapter engine")
async def init_engine():
if hasattr(self.rollout, "_init_server_adapter"):
await self.rollout._init_server_adapter()
else:
print("[sync_rollout_weights] No _init_server_adapter method found")
return self.rollout._engine
inference_model = self._run_async_safely(init_engine())
# For ServerAdapter, only TP rank 0 initializes the engine
# TP rank != 0 can safely have inference_model as None
from verl.workers.rollout.sglang_rollout.sglang_rollout import ServerAdapter
is_server_adapter = isinstance(self.rollout, ServerAdapter)
is_non_tp_rank = False
if (
is_server_adapter
and hasattr(self.rollout, "device_mesh")
and self.rollout.device_mesh is not None
):
try:
is_non_tp_rank = self.rollout.device_mesh["infer_tp"].get_local_rank() != 0
except Exception:
pass
if inference_model is None and not (is_server_adapter and is_non_tp_rank):
raise RuntimeError(
f"Failed to initialize rollout engine. "
f"rollout type: {type(self.rollout)}, "
f"has _init_server_adapter: {hasattr(self.rollout, '_init_server_adapter')}"
)
else:
raise NotImplementedError(f"Unknown rollout name: {rollout_name}")
# Update the checkpoint with the inference model and broadcast weights
self.checkpoint_engine.update_checkpoint(
inference_model=inference_model,
group_name=sync_group_name,
overlap_broadcast_and_consume=self.config.checkpoint_engine.overlap_broadcast_and_consume,
)
update_end_time = time.time()
update_duration = update_end_time - update_start_time
collective.barrier(group_name=sync_group_name)
offload_start_time = time.time()
if self._is_actor and self._is_offload_param:
offload_megatron_model_to_cpu(self.actor_module)
offload_duration = time.time() - offload_start_time
print(
f"sync_rollout_weights_by_checkpoint finish!, rank:{torch.distributed.get_rank()},"
f" is_actor:{self._is_actor}, is_rollout:{self._is_rollout},"
f" total cost:{update_end_time - cache_start_time} seconds, while cache cost {cache_duration} seconds, "
f" register cost {register_duration} seconds, update cost {update_duration} seconds"
)
if self._is_actor and self._is_offload_param:
print(
f"sync_rollout_weights_by_checkpoint load model to gpu cost {load_duration} seconds,"
f" offload model to cpu cost {offload_duration} seconds"
)
class DetachActorWorker(DetachNcclSync):
def __init__(self, config: DictConfig, role: str):
print("[DetachAsyncRolloutWorker] Initializing via DetachNcclSync...")
DetachNcclSync.__init__(self, config, role)
def _get_actor_params_generator(self):
assert self._is_actor
if self.bridge is not None:
if self.vanilla_bridge:
generator = self.bridge.export_weights(self.actor.actor_module)
else:
generator = self.bridge.export_hf_weights(self.actor.actor_module)
else:
generator = per_tensor_generator(
self.actor.actor_module,
self.actor_model_config,
self.weight_converter,
self.tf_config,
self.layer_name_mapping,
)
return generator
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def get_actor_weights_info(self):
assert self._is_actor
if hasattr(self, "_weights_info"):
return self._weights_info
if self._is_offload_param:
load_megatron_model_to_gpu(self.actor_module, False)
params_generator = self._get_actor_params_generator()
ret = []
for key, tensor in params_generator:
ret.append((key, tensor.size(), tensor.dtype))
self._weights_info = ret
# Here, we only call this function at the beginning,
# and immediately afterwards we call sync_rollout_weights.
# So we no longer call offload in this.
return ret
class DetachAsyncRolloutWorker(DetachNcclSync):
def __init__(self, config: DictConfig, role: str):
print(f"[DetachAsyncRolloutWorker] {DetachAsyncRolloutWorker.__mro__}")
DetachNcclSync.__init__(self, config, role)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def set_actor_weights_info(self, weights_info):
assert self._is_rollout
self._weights_info = weights_info
|
verl__experimental__fully_async_policy__megatron_worker.py
|
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
from collections import deque
from typing import Any
import ray
from omegaconf import DictConfig
logger = logging.getLogger(__name__)
@ray.remote(num_cpus=2, max_concurrency=20)
class MessageQueue:
"""
Simplified Ray-based asynchronous message queue for communication between Rollouter and Trainer
"""
def __init__(self, config: DictConfig, max_queue_size: int = 1000):
self.config = config
if max_queue_size is None:
raise ValueError(f"max_queue_size cannot be None, got: {max_queue_size}")
self.max_queue_size = int(max_queue_size)
self.queue = deque(maxlen=self.max_queue_size)
self.current_param_version = 0
self.val_queue = deque()
try:
if hasattr(config, "async_training") and config.async_training is not None:
self.staleness_threshold = getattr(config.async_training, "staleness_threshold", 3)
else:
self.staleness_threshold = 3
except (AttributeError, RecursionError):
self.staleness_threshold = 3
# Asyncio for message handling
self.running = True
# async safe
self._lock = asyncio.Lock()
self._consumer_condition = asyncio.Condition(self._lock)
# statistic message
self.total_produced = 0
self.total_consumed = 0
self.dropped_samples = 0
print(
f"[MessageQueue] initialized with max_queue_size={max_queue_size}, "
f"staleness_threshold={self.staleness_threshold}"
)
async def put_sample(self, sample: Any, param_version: int) -> bool:
"""
Put a batch sample into the queue
Args:
sample: Sample data
param_version: Parameter version number
Returns:
bool: Whether the sample was successfully put into the queue
"""
async with self._lock:
# If queue is full, remove the oldest sample (rarely happens)
is_drop = False
if len(self.queue) >= self.max_queue_size:
self.queue.popleft()
self.dropped_samples += 1
is_drop = True
logger.warning("Queue full, dropped sample")
self.queue.append(sample)
self.total_produced += 1
# Notify waiting consumers
self._consumer_condition.notify_all()
if self.total_produced % 100 == 0:
print(f"MessageQueue stats: produced={self.total_produced}, queue_size={len(self.queue)}")
if is_drop:
return False
return True
async def get_sample(self) -> Any | None:
"""
Get a single sample from the queue, wait until one is available
Returns:
Any: Single sample data or None if queue is closed
"""
async with self._lock:
while len(self.queue) == 0 and self.running:
await self._consumer_condition.wait()
# If queue is closed and empty, return None
if not self.running and len(self.queue) == 0:
return None
# Get one sample
data = self.queue.popleft()
self.total_consumed += 1
return data, len(self.queue)
async def update_param_version(self, version: int):
"""Update current parameter version"""
async with self._lock:
old_version = self.current_param_version
self.current_param_version = version
print(f"Parameter version updated from {old_version} to {version}")
async def get_queue_size(self) -> int:
"""Get current queue length"""
async with self._lock:
return len(self.queue)
async def get_statistics(self) -> dict[str, Any]:
"""Get queue statistics"""
async with self._lock:
return {
"queue_size": len(self.queue),
"total_produced": self.total_produced,
"total_consumed": self.total_consumed,
"dropped_samples": self.dropped_samples,
"current_param_version": self.current_param_version,
"staleness_threshold": self.staleness_threshold,
"max_queue_size": self.max_queue_size,
}
async def clear_queue(self):
"""Clear the queue"""
async with self._lock:
cleared_count = len(self.queue)
self.queue.clear()
logger.info(f"Cleared {cleared_count} samples from queue")
async def shutdown(self):
"""Shutdown the message queue"""
async with self._lock:
self.running = False
# Notify all waiting coroutines so they can exit
self._consumer_condition.notify_all()
logger.info("MessageQueue shutdown")
async def get_memory_usage(self) -> dict:
"""Get memory usage statistics"""
async with self._lock:
# Estimate memory usage of samples in queue
import sys
total_size = 0
sample_count = len(self.queue)
if sample_count > 0:
# Estimate size of a single sample (simplified estimation)
sample = list(self.queue)[0]
try:
sample_size = sys.getsizeof(sample)
# Since we now store RolloutSample directly, estimate based on its components
if hasattr(sample, "original_batch_dict") and sample.original_batch_dict:
# Estimate batch data size
batch_data = sample.original_batch_dict.get("batch", {})
sample_size += len(batch_data) * 1000 # Roughly estimate 1KB per batch entry
if hasattr(sample, "agent_loop_output"):
# Estimate AgentLoopOutput size
sample_size += 5000 # Roughly estimate 5KB for AgentLoopOutput
total_size = sample_size * sample_count
except Exception:
total_size = sample_count * 15000 # Roughly estimate 15KB per RolloutSample
return {
"queue_samples": sample_count,
"estimated_memory_bytes": total_size,
"estimated_memory_mb": total_size / (1024 * 1024),
}
async def put_validate(self, data):
async with self._lock:
self.val_queue.append(data)
async def get_validate(self):
async with self._lock:
if self.val_queue:
return self.val_queue.popleft()
else:
return None
class MessageQueueClient:
"""Asyncio-compatible MessageQueue client for communicating with MessageQueue Actor"""
def __init__(self, queue_actor: Any):
self.queue_actor = queue_actor
async def put_sample(self, sample: Any, param_version: int) -> bool:
"""Put batch into queue (async)"""
future = self.queue_actor.put_sample.remote(sample, param_version)
return await asyncio.wrap_future(future.future())
async def put_validate(self, data: Any) -> bool:
future = self.queue_actor.put_validate.remote(data)
return await asyncio.wrap_future(future.future())
def get_validate_sync(self) -> Any | None:
return ray.get(self.queue_actor.get_validate.remote())
async def get_sample(self) -> Any | None:
"""Get single sample from queue, wait until one is available (async)"""
future = self.queue_actor.get_sample.remote()
return await asyncio.wrap_future(future.future())
async def get_queue_size(self) -> int:
"""Get queue size (async)"""
future = self.queue_actor.get_queue_size.remote()
return await asyncio.wrap_future(future.future())
async def get_statistics(self) -> dict[str, Any]:
"""Get statistics (async)"""
future = self.queue_actor.get_statistics.remote()
return await asyncio.wrap_future(future.future())
async def clear_queue(self):
"""Clear queue (async)"""
future = self.queue_actor.clear_queue.remote()
await asyncio.wrap_future(future.future())
async def shutdown(self):
"""Shutdown queue (async)"""
future = self.queue_actor.shutdown.remote()
await asyncio.wrap_future(future.future())
async def get_memory_usage(self) -> dict:
"""Get memory usage statistics (async)"""
future = self.queue_actor.get_memory_usage.remote()
return await asyncio.wrap_future(future.future())
# Synchronous version of the method (deprecated)
def put_sample_sync(self, sample: Any, param_version: int) -> bool:
"""Put batch into queue (sync - deprecated, use put_sample instead)"""
return ray.get(self.queue_actor.put_sample.remote(sample, param_version))
def get_sample_sync(self) -> Any | None:
"""Get single sample from queue (sync - deprecated, use get_sample instead)"""
return ray.get(self.queue_actor.get_sample.remote())
def get_statistics_sync(self) -> dict[str, Any]:
"""Get statistics (sync - deprecated, use get_statistics instead)"""
return ray.get(self.queue_actor.get_statistics.remote())
def update_param_version_sync(self, version: int):
"""Update parameter version (async)"""
return ray.get(self.queue_actor.update_param_version.remote(version))
|
verl__experimental__fully_async_policy__message_queue.py
|
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
import ray
from ray.util.collective import collective
from verl.utils.device import get_nccl_backend
logger = logging.getLogger(__name__)
@ray.remote
class ParameterSynchronizer:
"""
Unified parameter synchronizer, responsible for synchronizing model parameters between actor and rollout
Based on the mature synchronization mode implementation of one_step_off_policy
Merges the functions of the original multiple synchronizer classes
"""
def __init__(self, config, trainer, rollouter, mq):
self.config = config
self.trainer = trainer
self.rollouter = rollouter
self.mq_client = mq
self.actor_wg = ray.get(trainer.get_actor_wg.remote())
self.rollout_wg = ray.get(rollouter.get_rollout_wg.remote())
# Basic attributes
self.weights_info = None
self.sync_group_initialized = False
self.sync_group_name = "actor_rollout"
self.wait_last_update = None
self.wait_last_resume = None
self.validate_task = None
# Statistics
self.current_version = 0
self._init_weights_info()
self._init_sync_group()
if self.config.async_training.checkpoint_engine.enable:
self._init_actor_rollout_checkpoint_engine()
def get_current_param_version(self) -> int:
"""Get current parameter version number"""
return self.current_version
def get_weights_info(self):
"""Get weights info"""
return self.weights_info
def _init_weights_info(self):
self.weights_info = self.actor_wg.get_actor_weights_info()[0]
self.rollout_wg.set_actor_weights_info(self.weights_info)
def _init_sync_group(self):
print("[ParameterSynchronizer] Initializing parameter synchronization group...")
actor_rollout_workers = self.actor_wg.workers + self.rollout_wg.workers
n_workers = len(self.actor_wg.workers + self.rollout_wg.workers)
if self.config.trainer.device == "npu":
master_address = ray.get(self.actor_wg.workers[0]._get_node_ip.remote()).strip("[]")
master_port = ray.get(self.actor_wg.workers[0]._get_free_port.remote())
self.actor_wg.create_weight_sync_group(
master_address,
master_port,
0,
n_workers,
)
ray.get(
self.rollout_wg.create_weight_sync_group(
master_address,
master_port,
len(self.actor_wg.workers),
n_workers,
)
)
else:
collective.create_collective_group(
actor_rollout_workers,
n_workers,
list(range(0, n_workers)),
backend=get_nccl_backend(),
group_name=self.sync_group_name,
)
def _init_actor_rollout_checkpoint_engine(self):
ray.get(
self.actor_wg.init_checkpoint_engine(
rank_offset=0,
actor_num=len(self.actor_wg.workers),
rollout_num=len(self.rollout_wg.workers),
)
)
ray.get(
self.rollout_wg.init_checkpoint_engine(
rank_offset=len(self.actor_wg.workers),
actor_num=len(self.actor_wg.workers),
rollout_num=len(self.rollout_wg.workers),
)
)
def sync_weights(self, version, validate=False, global_steps=0, use_trainer_do_validate=False):
"""Sync weights between trainer and rollouter, and update parameter version"""
start_time = time.time()
self.current_version = version
ray.get(self.rollouter.pause.remote())
print(f"[ParameterSynchronizer] rollout paused. cost {time.time() - start_time:.2f} seconds")
# Update MQ version
self.mq_client.update_param_version_sync(version)
pause_time = time.time()
# sync weights
# For sglang, always use sync_rollout_weights instead of sync_rollout_weights_by_checkpoint
# TODO use checkpoint engine for sglang rollout
# rollout_name = getattr(self.config.actor_rollout_ref.rollout, "name", None)
# use_checkpoint_engine = self.config.async_training.checkpoint_engine.enable and rollout_name != "sglang"
# if use_checkpoint_engine:
# self.actor_wg.sync_rollout_weights_by_checkpoint(self.sync_group_name)
# ray.get(self.rollout_wg.sync_rollout_weights_by_checkpoint(self.sync_group_name))
# else:
# self.actor_wg.sync_rollout_weights(self.sync_group_name)
# ray.get(self.rollout_wg.sync_rollout_weights(self.sync_group_name))
end_time = time.time()
print(
f"[ParameterSynchronizer] sync_weights success. cost {end_time - start_time:.2f} seconds, "
f"pause:{pause_time - start_time:.2f}s, sync:{end_time - pause_time:.2f}s"
)
# async train do validate
print(f"[ParameterSynchronizer] validate: {validate}, use_trainer_do_validate: {use_trainer_do_validate}")
if validate and use_trainer_do_validate:
print("[ParameterSynchronizer] use trainer to do validate")
self.validate_task = self.trainer._validate_process.remote()
else:
self.validate_task = None
# Async Update rollout version & validation
self.wait_last_update = self.rollouter.update_param_version.remote(
version, validate, global_steps, use_trainer_do_validate
)
self.wait_last_resume = self.rollouter.resume.remote(self.wait_last_update)
def wait_last_valid(self):
print("[ParameterSynchronizer] Waiting last sync and validate...")
start_time = time.time()
if self.wait_last_update:
ray.get(self.wait_last_update)
if self.wait_last_resume:
ray.get(self.wait_last_resume)
if self.validate_task:
ray.get(self.validate_task)
print(f"[ParameterSynchronizer] Wait last validate cost: {time.time() - start_time:.2f} seconds")
def rollouter_save_checkpoint(self, local_global_step_folder: str):
"""Trigger rollout to save checkpoint(dataloader)"""
print(f"[ParameterSynchronizer] Triggering checkpoint save at {local_global_step_folder} ...")
return ray.get(self.rollouter.save_checkpoint.remote(local_global_step_folder))
|
verl__experimental__fully_async_policy__param_sync.py
|
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
from typing import Any, Optional
import ray
import torch
from ray.actor import ActorHandle
from verl.workers.config import HFModelConfig, RolloutConfig
from verl.workers.rollout.replica import RolloutMode
from verl.workers.rollout.sglang_rollout.async_sglang_server import (
SGLangHttpServer,
SGLangReplica,
)
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
class SGLangHttpServerForPartial(SGLangHttpServer):
def __init__(
self,
config: RolloutConfig,
model_config: HFModelConfig,
rollout_mode: RolloutMode,
workers: list[ActorHandle],
replica_rank: int,
node_rank: int,
nnodes: int,
cuda_visible_devices: str,
base_gpu_id: int,
):
super().__init__(
config=config,
model_config=model_config,
rollout_mode=rollout_mode,
workers=workers,
replica_rank=replica_rank,
node_rank=node_rank,
nnodes=nnodes,
cuda_visible_devices=cuda_visible_devices,
base_gpu_id=base_gpu_id,
)
# for cancel LLMServer
self.paused = False
self.lock = asyncio.Lock()
self.cancel_event: dict[str, asyncio.Event] = {}
self.req_output: dict[str, Optional[dict[str, Any]]] = {}
async def _generate_step(
self,
prompt_ids: torch.Tensor,
sampling_params: dict[str, Any],
request_id: str,
image_data: Optional[list[Any]] = None,
video_data: Optional[list[Any]] = None,
) -> None:
sampling_params = dict(sampling_params)
max_new_tokens = min(
self.config.response_length,
self.config.max_model_len - len(prompt_ids) - 1,
)
sampling_params["max_new_tokens"] = max_new_tokens
sampling_params.setdefault(
"repetition_penalty",
self.config.get("repetition_penalty", 1.0),
)
sampling_params.pop("logprobs", None)
return_logprob = True
from sglang.srt.managers.io_struct import GenerateReqInput
if video_data is not None and len(video_data) > 0:
logger.warning(
f"Request {request_id} received video_data but it is not used. "
"This is to keep consistency with the implementation in "
"verl/workers/rollout/sglang_rollout/async_sglang_server.py. "
"Video data will be ignored."
)
request = GenerateReqInput(
rid=request_id,
input_ids=prompt_ids,
sampling_params=sampling_params,
return_logprob=return_logprob,
image_data=image_data,
# TODO: support video input for sglang
# video_data=video_data,
)
generator = self.tokenizer_manager.generate_request(request, None)
async for output in generator:
self.req_output[request_id] = output
assert self.req_output[request_id] is not None
async def generate_for_partial(
self,
prompt_ids: torch.Tensor,
sampling_params: dict[str, Any],
request_id: str,
image_data: Optional[list[Any]] = None,
video_data: Optional[list[Any]] = None,
) -> tuple[list[int], list[float], bool]:
async with self.lock:
if self.paused:
return [], [], True
self.req_output[request_id] = None
self.cancel_event[request_id] = asyncio.Event()
cancel_handle = asyncio.create_task(self.cancel_event[request_id].wait())
generation_handle = asyncio.create_task(
self._generate_step(prompt_ids, sampling_params, request_id, image_data, video_data)
)
done, pending = await asyncio.wait(
[generation_handle, cancel_handle],
return_when=asyncio.FIRST_COMPLETED,
)
for task in done:
await task
for task in pending:
task.cancel()
async with self.lock:
output = self.req_output.get(request_id)
if output is None:
self.cancel_event.pop(request_id, None)
self.req_output.pop(request_id, None)
return [], [], True
meta_info = output.get("meta_info", {})
output_token_logprobs = meta_info.get("output_token_logprobs")
token_ids: list[int] = []
log_probs: list[float] = []
if output_token_logprobs is not None:
for log_prob, token_id, _ in output_token_logprobs:
token_ids.append(int(token_id))
log_probs.append(float(log_prob))
else:
token_ids = list(output["output_ids"])
log_probs = []
is_cancel = generation_handle not in done
self.cancel_event.pop(request_id, None)
self.req_output.pop(request_id, None)
return token_ids, log_probs, is_cancel
async def cancel(self):
async with self.lock:
self.paused = True
for request_id in self.cancel_event:
self.cancel_event[request_id].set()
async def resume(self):
async with self.lock:
self.paused = False
class FullyAsyncSGLangReplica(SGLangReplica):
def __init__(
self,
replica_rank: int,
config: RolloutConfig,
model_config: HFModelConfig,
gpus_per_node: int = 8,
is_reward_model: bool = False,
):
super().__init__(replica_rank, config, model_config, gpus_per_node, is_reward_model)
self.server_class = ray.remote(SGLangHttpServerForPartial)
async def cancel(self):
"""Cancel each rollout server."""
await asyncio.gather(*[server.cancel.remote() for server in self.servers])
async def resume(self):
"""Resume each rollout server."""
await asyncio.gather(*[server.resume.remote() for server in self.servers])
|
verl__experimental__fully_async_policy__sglang_rollout__sglang_async_server.py
|
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
from typing import Any, Optional, Sequence
import ray
from ray.actor import ActorHandle
from vllm import SamplingParams
from vllm.inputs import TokensPrompt
from vllm.outputs import RequestOutput
from verl.workers.config import HFModelConfig, RolloutConfig
from verl.workers.rollout.replica import RolloutMode
from verl.workers.rollout.vllm_rollout.vllm_async_server import (
_qwen2_5_vl_dedup_image_tokens,
vLLMHttpServer,
vLLMReplica,
)
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
class vLLMHttpServerForPartial(vLLMHttpServer):
def __init__(
self,
config: RolloutConfig,
model_config: HFModelConfig,
rollout_mode: RolloutMode,
workers: list[ActorHandle],
replica_rank: int,
node_rank: int,
gpus_per_node: int,
nnodes: int,
cuda_visible_devices: str,
):
super().__init__(
config,
model_config,
rollout_mode,
workers,
replica_rank,
node_rank,
gpus_per_node,
nnodes,
cuda_visible_devices,
)
# for cancel LLMServer
self.paused = False
self.lock = asyncio.Lock()
self.cancel_event: dict[str, asyncio.Event] = {}
self.req_output: dict[str, Optional[RequestOutput]] = {}
async def _generate_step(
self,
prompt_ids: list[int],
sampling_params: dict[str, Any],
request_id: str,
image_data: Optional[list[Any]] = None,
video_data: Optional[list[Any]] = None,
):
max_tokens = self.config.max_model_len - len(prompt_ids)
sampling_params["logprobs"] = 1
sampling_params.setdefault("repetition_penalty", self.config.get("repetition_penalty", 1.0))
sampling_params = SamplingParams(max_tokens=max_tokens, **sampling_params)
prompt_ids = _qwen2_5_vl_dedup_image_tokens(prompt_ids, self.model_config.processor)
multi_modal_data = {}
if image_data is not None:
multi_modal_data["image"] = image_data
if video_data is not None:
multi_modal_data["video"] = video_data
prompt = TokensPrompt(prompt_token_ids=prompt_ids, multi_modal_data=multi_modal_data)
generator = self.engine.generate(prompt=prompt, sampling_params=sampling_params, request_id=request_id)
# Get final response
async for output in generator:
self.req_output[request_id] = output
assert self.req_output[request_id] is not None
async def generate_for_partial(
self,
prompt_ids: list[int],
sampling_params: dict[str, Any],
request_id: str,
image_data: Optional[list[Any]] = None,
video_data: Optional[list[Any]] = None,
) -> tuple[list[Any], list[Any], bool] | tuple[Sequence[int], list[float], Any]:
async with self.lock:
if self.paused:
# After cancel, all tasks will return directly and wait for the next submission
return [], [], True
self.req_output[request_id]: Optional[RequestOutput] = None
self.cancel_event[request_id] = asyncio.Event()
cancel_handle = asyncio.create_task(self.cancel_event[request_id].wait())
generation_handle = asyncio.create_task(
self._generate_step(prompt_ids, sampling_params, request_id, image_data, video_data)
)
done, pend = await asyncio.wait([generation_handle, cancel_handle], return_when=asyncio.FIRST_COMPLETED)
for task in done:
await task
for task in pend:
task.cancel()
async with self.lock:
if self.req_output[request_id] is None:
return [], [], True
token_ids = self.req_output[request_id].outputs[0].token_ids
log_probs: list[float] = []
for i, x in enumerate(self.req_output[request_id].outputs[0].logprobs):
# In sampling_params, logprobs is set to 1, which should return 1,
# but in practice there are multiple. Take the log_prob corresponding to token_id
token_id = self.req_output[request_id].outputs[0].token_ids[i]
log_probs.append(x[token_id].logprob)
is_cancel = generation_handle not in done
self.cancel_event.pop(request_id, None)
self.req_output.pop(request_id, None)
return token_ids, log_probs, is_cancel
async def cancel(self):
async with self.lock:
self.paused = True
for request_id in self.cancel_event:
self.cancel_event[request_id].set()
async def resume(self):
async with self.lock:
self.paused = False
class FullyAsyncvLLMReplica(vLLMReplica):
def __init__(
self,
replica_rank: int,
config: RolloutConfig,
model_config: HFModelConfig,
gpus_per_node: int = 8,
is_reward_model: bool = False,
):
super().__init__(replica_rank, config, model_config, gpus_per_node, is_reward_model)
self.server_class = ray.remote(vLLMHttpServerForPartial)
async def cancel(self):
"""Cancel each rollout server."""
await asyncio.gather(*[server.cancel.remote() for server in self.servers])
async def resume(self):
"""Resume each rollout server."""
await asyncio.gather(*[server.resume.remote() for server in self.servers])
|
verl__experimental__fully_async_policy__vllm_rollout__vllm_async_server.py
|
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import os
import ray
from verl.experimental.agent_loop.agent_loop import AgentLoopManager
from verl.protocol import DataProto
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class OneStepOffAgentLoopManager(AgentLoopManager):
async def generate_sequences_async(self, prompts: DataProto) -> DataProto:
"""Split input batch and dispatch to agent loop workers (async version).
Args:
prompts (DataProto): Input batch.
Returns:
DataProto: Output batch.
"""
chunkes = prompts.chunk(len(self.agent_loop_workers))
# Use asyncio.gather with ray.get wrapped in asyncio.to_thread to avoid blocking
import asyncio
outputs = await asyncio.gather(
*[
asyncio.to_thread(ray.get, worker.generate_sequences.remote(chunk))
for worker, chunk in zip(self.agent_loop_workers, chunkes, strict=True)
]
)
output = DataProto.concat(outputs)
# calculate performance metrics
metrics = [output.meta_info.pop("metrics") for output in outputs] # List[List[Dict[str, str]]]
timing = self._performance_metrics(metrics, output)
output.meta_info = {"timing": timing, **outputs[0].meta_info}
return output
async def wake_up(self):
await asyncio.gather(*[replica.wake_up() for replica in self.rollout_replicas])
async def sleep(self):
await asyncio.gather(*[replica.sleep() for replica in self.rollout_replicas])
async def clear_kv_cache(self):
await asyncio.gather(*[replica.clear_kv_cache() for replica in self.rollout_replicas])
|
verl__experimental__one_step_off_policy__agent_loop__agent_loop.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ipaddress
import socket
from datetime import timedelta
import vllm
from torch.distributed import TCPStore
from vllm.distributed.utils import StatelessProcessGroup
from verl.utils.device import is_npu_available
@staticmethod
def create(
host: str,
port: int,
rank: int,
world_size: int,
data_expiration_seconds: int = 3600,
store_timeout: int = 300,
) -> "StatelessProcessGroup":
"""A replacement for `torch.distributed.init_process_group` that does not
pollute the global state.
If we have process A and process B called `torch.distributed.init_process_group`
to form a group, and then we want to form another group with process A, B, C,
D, it is not possible in PyTorch, because process A and process B have already
formed a group, and process C and process D cannot join that group. This
function is a workaround for this issue.
`torch.distributed.init_process_group` is a global call, while this function
is a stateless call. It will return a `StatelessProcessGroup` object that can be
used for exchanging metadata. With this function, process A and process B
can call `StatelessProcessGroup.create` to form a group, and then process A, B,
C, and D can call `StatelessProcessGroup.create` to form another group.
Args:
host: Host address (IPv4 or IPv6). For IPv6, can be in format like "::1" or "[::1]".
port: Port number to bind/listen on.
rank: Rank of the current process.
world_size: Total number of processes in the group.
data_expiration_seconds: Time in seconds before data entries expire (default: 3600).
store_timeout: Timeout in seconds for TCPStore connection (default: 300).
Returns:
StatelessProcessGroup: A stateless process group instance.
""" # noqa
# Detect address family (IPv4 or IPv6)
try:
# Try to parse as IPv6 first (IPv6 addresses are more specific)
ipaddress.IPv6Address(host.strip("[]"))
address_family = socket.AF_INET6
except (ipaddress.AddressValueError, ValueError):
address_family = socket.AF_INET
launch_server = rank == 0
if launch_server:
# listen on the specified interface (instead of 0.0.0.0 or ::)
listen_socket = socket.socket(address_family, socket.SOCK_STREAM)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# For IPv6, set IPV6_V6ONLY to only listen on IPv6 (not dual-stack)
# This ensures consistent behavior across different systems
if address_family == socket.AF_INET6:
try:
listen_socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
except (AttributeError, OSError):
# IPV6_V6ONLY might not be available on all systems
pass
# Remove brackets from IPv6 address if present (socket.bind handles it)
bind_host = host.strip("[]")
listen_socket.bind((bind_host, port))
listen_socket.listen()
listen_fd = listen_socket.fileno()
else:
listen_socket = None
listen_fd = None
store = TCPStore(
host_name=host,
port=port,
world_size=world_size,
is_master=launch_server,
timeout=timedelta(seconds=store_timeout),
use_libuv=False, # for now: github.com/pytorch/pytorch/pull/150215
master_listen_fd=listen_fd,
)
return StatelessProcessGroup(
rank=rank,
world_size=world_size,
store=store,
socket=listen_socket,
data_expiration_seconds=data_expiration_seconds,
)
vllm.distributed.utils.StatelessProcessGroup.create = create
def vllm_stateless_init_process_group(master_address, master_port, rank, world_size, device):
"""
vLLM provides `StatelessProcessGroup` to create a process group
without considering the global process group in torch.distributed.
It is recommended to create `StatelessProcessGroup`, and then initialize
the data-plane communication (NCCL) between external (train processes)
and vLLM workers.
"""
# NOTE: If it is necessary to support weight synchronization with the sglang backend in the future,
# the following can be used:
# from sglang.srt.distributed.device_communicators.pynccl import PyNcclCommunicator
# from sglang.srt.distributed.utils import statelessprocessgroup
if is_npu_available:
from vllm_ascend.distributed.device_communicators.pyhccl import (
PyHcclCommunicator as PyNcclCommunicator,
)
else:
from vllm.distributed.device_communicators.pynccl import PyNcclCommunicator
pg = StatelessProcessGroup.create(host=master_address, port=master_port, rank=rank, world_size=world_size)
pynccl = PyNcclCommunicator(pg, device=device)
return pynccl
|
verl__experimental__one_step_off_policy__distributed_utils.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import torch
import torch.distributed
from omegaconf import DictConfig
from ray.util.collective import collective
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from verl.experimental.one_step_off_policy.distributed_utils import vllm_stateless_init_process_group
from verl.single_controller.base.decorator import Dispatch, register
from verl.utils.device import (
get_device_name,
get_torch_device,
)
from verl.utils.fsdp_utils import (
fsdp_version,
load_fsdp_model_to_gpu,
offload_fsdp_model_to_cpu,
)
from verl.utils.ray_utils import get_event_loop
from verl.workers.fsdp_workers import (
ActorRolloutRefWorker,
AsyncActorRolloutRefWorker,
CriticWorker,
)
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
device_name = get_device_name()
__all__ = ["DetachActorWorker", "DetachAsyncRolloutWorker", "CriticWorker"]
class DetachSync(AsyncActorRolloutRefWorker):
def _get_actor_params(self):
pass
@register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False)
def create_weight_sync_group(self, master_address, master_port, rank_offset, world_size):
rank = torch.distributed.get_rank() + rank_offset
self._weight_sync_group = vllm_stateless_init_process_group(
master_address,
master_port,
rank,
world_size,
get_torch_device().current_device(),
)
@register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False)
def sync_rollout_weights(self):
assert (self._is_actor or self._is_rollout) and not self.config.hybrid_engine
assert hasattr(self, "_weights_info") and self._weights_info is not None
if self._is_actor and self._is_offload_param:
load_fsdp_model_to_gpu(self.actor_module_fsdp)
params = self._get_actor_params() if self._is_actor else None
rollout_name = self.config.rollout.name
if self._is_rollout:
if rollout_name == "vllm":
from verl.utils.vllm.patch import patch_vllm_moe_model_weight_loader
inference_model = self.rollout.inference_engine.worker.model_runner.model
patch_vllm_moe_model_weight_loader(inference_model)
elif rollout_name == "sglang":
inference_model = self.rollout._engine
else:
raise NotImplementedError(f"Unknown rollout name: {rollout_name}")
loop = get_event_loop()
for key, shape, dtype in self._weights_info:
tensor = torch.empty(shape, dtype=dtype, device=get_torch_device().current_device())
if self._is_actor:
assert key in params
origin_data = params[key]
if hasattr(origin_data, "full_tensor"):
origin_data = origin_data.full_tensor()
if torch.distributed.get_rank() == 0:
tensor.copy_(origin_data)
if device_name == "npu":
self._weight_sync_group.broadcast(tensor, src=0, stream=get_torch_device().current_stream())
else:
collective.broadcast(tensor, src_rank=0, group_name="actor_rollout")
if self._is_rollout:
if rollout_name == "vllm":
inference_model.load_weights([(key, tensor)])
elif rollout_name == "sglang":
# first_rank_in_node = self._tp_rank % tp_size_per_node == 0,
# Only the first rank within each node (i.e., the local rank is 0) initializes the engine;
# engines for other ranks are set to None.
if inference_model is not None:
loop.run_until_complete(self.update_weights(inference_model, [(key, tensor)]))
if self._is_actor and self._is_offload_param:
offload_fsdp_model_to_cpu(self.actor_module_fsdp)
get_torch_device().empty_cache()
async def update_weights(self, inference_engine, params):
from sglang.srt.weight_sync.utils import update_weights as sgl_update_weights
await sgl_update_weights(
engine=inference_engine,
params_batch=params,
device_mesh_key="infer_tp",
device_mesh=self.rollout_device_mesh,
)
if self.rollout_device_mesh["infer_tp"].get_local_rank() == 0:
await inference_engine.flush_cache()
class DetachActorWorker(DetachSync):
def _get_actor_params(self):
assert self._is_actor
params = self.actor_module_fsdp.state_dict()
from verl.utils.model import convert_weight_keys
params = convert_weight_keys(
params, getattr(self.actor_module_fsdp, "_fsdp_wrapped_module", self.actor_module_fsdp)
)
return params
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def get_actor_weights_info(self):
assert self._is_actor
if hasattr(self, "_weights_info"):
return self._weights_info
if fsdp_version(self.actor_module_fsdp) == 1:
from torch.distributed.fsdp.api import ShardedStateDictConfig, StateDictType
FSDP.set_state_dict_type(
self.actor_module_fsdp,
state_dict_type=StateDictType.SHARDED_STATE_DICT,
state_dict_config=ShardedStateDictConfig(),
)
params = self._get_actor_params()
ret = []
for key, tensor in params.items():
ret.append((key, tensor.size(), tensor.dtype))
self._weights_info = ret
return ret
class DetachAsyncRolloutWorker(DetachSync):
def __init__(self, config: DictConfig, role: str):
print(f"[DetachAsyncRolloutWorker] {DetachAsyncRolloutWorker.__mro__}")
ActorRolloutRefWorker.__init__(self, config, role)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def set_actor_weights_info(self, weights_info):
assert self._is_rollout
self._weights_info = weights_info
|
verl__experimental__one_step_off_policy__fsdp_workers.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Note that we don't combine the main with ray_trainer as ray_trainer is used by other main.
"""
import asyncio
import os
import socket
import hydra
import ray
from verl.experimental.one_step_off_policy.ray_trainer import OneStepOffRayTrainer
from verl.experimental.one_step_off_policy.utils import need_critic
from verl.trainer.main_ppo import create_rl_dataset, create_rl_sampler
from verl.trainer.ppo.ray_trainer import ResourcePoolManager
from verl.trainer.ppo.utils import Role, need_reference_policy
from verl.utils.config import validate_config
from verl.utils.device import auto_set_device
def create_resource_pool_manager(config, roles: list) -> ResourcePoolManager:
"""
Create resource pool manager
Args:
config: Configuration object
roles: List of roles that need to create resource pools
Returns:
ResourcePoolManager: Resource pool manager
"""
resource_pool_spec = {}
mapping = {}
# Actor/Critic resource pool
if any(role in roles for role in [Role.Actor, Role.Critic, Role.RefPolicy, Role.RewardModel]):
assert config.trainer.n_gpus_per_node > 0, "config.trainer.n_gpus_per_node must be greater than 0"
assert config.trainer.nnodes > 0, "config.trainer.nnodes must be greater than 0"
trainer_pool = [config.trainer.n_gpus_per_node] * config.trainer.nnodes
resource_pool_spec["trainer_pool"] = trainer_pool
# Map training-related roles to the same resource pool
for role in [Role.Actor, Role.Critic, Role.RefPolicy, Role.RewardModel]:
if role in roles:
mapping[role] = "trainer_pool"
# Rollout resource pool
if Role.Rollout in roles:
assert config.rollout.n_gpus_per_node > 0, "config.rollout.n_gpus_per_node must be greater than 0"
assert config.rollout.nnodes > 0, "config.rollout.nnodes must be greater than 0"
rollout_pool = [config.rollout.n_gpus_per_node] * config.rollout.nnodes
resource_pool_spec["rollout_pool"] = rollout_pool
mapping[Role.Rollout] = "rollout_pool"
return ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping)
def create_role_worker_mapping(config):
"""
Create mapping from roles to worker classes
Args:
config: Configuration object
Returns:
dict: Mapping from roles to worker classes
"""
# Select worker class based on strategy
use_legacy_worker_impl = config.trainer.get("use_legacy_worker_impl", "auto")
if use_legacy_worker_impl == "disable":
from verl.experimental.separation.engine_workers import (
DetachActorWorker,
DetachAsyncRolloutWorker,
TrainingWorker,
)
from verl.single_controller.ray import RayWorkerGroup
ray_worker_group_cls = RayWorkerGroup
CriticWorker = TrainingWorker
else:
if config.actor_rollout_ref.actor.strategy in ["fsdp", "fsdp2"]:
assert config.actor_rollout_ref.actor.strategy == config.critic.strategy
from verl.experimental.one_step_off_policy.fsdp_workers import (
CriticWorker,
DetachActorWorker,
DetachAsyncRolloutWorker,
)
from verl.single_controller.ray import RayWorkerGroup
ray_worker_group_cls = RayWorkerGroup
elif config.actor_rollout_ref.actor.strategy == "megatron":
assert config.critic.strategy == "megatron"
from verl.experimental.one_step_off_policy.megatron_workers import (
CriticWorker,
DetachActorWorker,
DetachAsyncRolloutWorker,
)
from verl.single_controller.ray import RayWorkerGroup
ray_worker_group_cls = RayWorkerGroup
else:
raise NotImplementedError(f"Unsupported strategy: {config.actor_rollout_ref.actor.strategy}")
role_worker_mapping = {
Role.Actor: ray.remote(DetachActorWorker),
Role.Rollout: ray.remote(DetachAsyncRolloutWorker),
Role.Critic: ray.remote(CriticWorker),
}
# Add reference policy (if KL loss or reward is required)
if need_reference_policy(config):
role_worker_mapping[Role.RefPolicy] = ray.remote(DetachActorWorker)
return role_worker_mapping, ray_worker_group_cls
@ray.remote(num_cpus=10, max_concurrency=100) # please make sure main_task is not scheduled on head
class OneStepTaskRunner:
def run(self, config):
# Print the initial configuration. `resolve=True` will evaluate symbolic values.
from pprint import pprint
from omegaconf import OmegaConf
from verl.utils.fs import copy_to_local
print(f"TaskRunner hostname: {socket.gethostname()}, PID: {os.getpid()}")
pprint(OmegaConf.to_container(config, resolve=True))
OmegaConf.resolve(config)
role_worker_mapping, ray_worker_group_cls = create_role_worker_mapping(config)
# validate config
validate_config(
config=config,
use_reference_policy=need_reference_policy(config),
use_critic=need_critic(config),
)
# Download the checkpoint from HDFS to the local machine.
# `use_shm` determines whether to use shared memory, which could lead to faster model loading if turned on
local_path = copy_to_local(
config.actor_rollout_ref.model.path, use_shm=config.actor_rollout_ref.model.get("use_shm", False)
)
# Instantiate the tokenizer and processor.
from verl.utils import hf_processor, hf_tokenizer
trust_remote_code = config.data.get("trust_remote_code", False)
tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code)
# Used for multimodal LLM, could be None
processor = hf_processor(local_path, trust_remote_code=trust_remote_code, use_fast=True)
resource_pool_manager = create_resource_pool_manager(config, role_worker_mapping.keys())
from verl.utils.dataset.rl_dataset import collate_fn
# Create training and validation datasets.
train_dataset = create_rl_dataset(
config.data.train_files,
config.data,
tokenizer,
processor,
max_samples=config.data.get("train_max_samples", -1),
)
val_dataset = create_rl_dataset(
config.data.val_files, config.data, tokenizer, processor, max_samples=config.data.get("val_max_samples", -1)
)
train_sampler = create_rl_sampler(config.data, train_dataset)
# Initialize the PPO trainer.
trainer = OneStepOffRayTrainer(
config=config,
tokenizer=tokenizer,
processor=processor,
role_worker_mapping=role_worker_mapping,
resource_pool_manager=resource_pool_manager,
ray_worker_group_cls=ray_worker_group_cls,
train_dataset=train_dataset,
val_dataset=val_dataset,
collate_fn=collate_fn,
train_sampler=train_sampler,
device_name=config.trainer.device,
)
# Initialize the workers of the trainer.
trainer.init_workers()
# Start the training process.
asyncio.run(trainer.fit())
@hydra.main(config_path="config", config_name="one_step_off_ppo_trainer", version_base=None)
def main(config):
from time import time
from verl.trainer.main_ppo import run_ppo
start_time = time()
# Automatically set `config.trainer.device = npu` when running on Ascend NPU.
auto_set_device(config)
run_ppo(config, task_runner_class=OneStepTaskRunner)
print(f"total time: {time() - start_time:.2f} seconds")
if __name__ == "__main__":
main()
|
verl__experimental__one_step_off_policy__main_ppo.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import torch
import torch.distributed
from omegaconf import DictConfig
from ray.util.collective import collective
from verl.experimental.one_step_off_policy.distributed_utils import vllm_stateless_init_process_group
from verl.single_controller.base.decorator import Dispatch, register
from verl.utils.device import (
get_device_name,
get_torch_device,
)
from verl.utils.megatron_utils import load_megatron_model_to_gpu, offload_megatron_model_to_cpu
from verl.utils.ray_utils import get_event_loop
from verl.workers.megatron_workers import (
ActorRolloutRefWorker,
AsyncActorRolloutRefWorker,
CriticWorker,
)
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
device_name = get_device_name()
__all__ = ["DetachActorWorker", "DetachAsyncRolloutWorker", "CriticWorker"]
class DetachSync(AsyncActorRolloutRefWorker):
def _get_actor_params(self):
pass
@register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False)
def create_weight_sync_group(self, master_address, master_port, rank_offset, world_size):
rank = torch.distributed.get_rank() + rank_offset
self._weight_sync_group = vllm_stateless_init_process_group(
master_address,
master_port,
rank,
world_size,
get_torch_device().current_device(),
)
@register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False)
def sync_rollout_weights(self):
assert (self._is_actor or self._is_rollout) and not self.config.hybrid_engine
assert hasattr(self, "_weights_info") and self._weights_info is not None
params_generator = self._get_actor_params_generator() if self._is_actor else None
if self._is_actor and self._is_offload_param:
load_megatron_model_to_gpu(self.actor_module)
rollout_name = self.config.rollout.name
if self._is_rollout:
if rollout_name == "vllm":
from verl.utils.vllm.patch import patch_vllm_moe_model_weight_loader
inference_model = self.rollout.inference_engine.worker.model_runner.model
patch_vllm_moe_model_weight_loader(inference_model)
elif rollout_name == "sglang":
inference_model = self.rollout._engine
else:
raise NotImplementedError(f"Unknown rollout name: {rollout_name}")
loop = get_event_loop()
for key, shape, dtype in self._weights_info:
if self._is_actor:
weight_key, weight = next(params_generator)
assert key == weight_key
assert shape == weight.size()
assert dtype == weight.dtype
tensor = torch.empty(shape, dtype=dtype, device=get_torch_device().current_device())
if self._is_actor and torch.distributed.get_rank() == 0:
tensor.copy_(weight)
if device_name == "npu":
self._weight_sync_group.broadcast(tensor, src=0, stream=get_torch_device().current_stream())
else:
collective.broadcast(tensor, src_rank=0, group_name="actor_rollout")
if self._is_rollout:
if rollout_name == "vllm":
inference_model.load_weights([(key, tensor)])
elif rollout_name == "sglang":
# first_rank_in_node = self._tp_rank % tp_size_per_node == 0,
# Only the first rank within each node (i.e., the local rank is 0) initializes the engine;
# engines for other ranks are set to None.
if inference_model is not None:
loop.run_until_complete(self.update_weights(inference_model, [(key, tensor)]))
if self._is_actor and self._is_offload_param:
offload_megatron_model_to_cpu(self.actor_module)
async def update_weights(self, inference_engine, params):
from sglang.srt.weight_sync.utils import update_weights as sgl_update_weights
await sgl_update_weights(
engine=inference_engine,
params_batch=params,
device_mesh_key="infer_tp",
device_mesh=self.rollout_device_mesh,
)
if self.rollout_device_mesh["infer_tp"].get_local_rank() == 0:
await inference_engine.flush_cache()
class DetachActorWorker(DetachSync):
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def _get_actor_params_generator(self):
assert self._is_actor
from verl.models.mcore import get_mcore_weight_converter
from verl.utils.megatron_utils import per_tensor_generator
layer_name_mapping = {
"qkv_layer_name": "self_attention.linear_qkv.",
"gate_proj_layer_name": "linear_fc1.",
}
weight_converter = get_mcore_weight_converter(self.actor_model_config, self.dtype)
generator = per_tensor_generator(
self.actor.actor_module,
self.actor_model_config,
weight_converter,
self.tf_config,
layer_name_mapping,
)
return generator
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def get_actor_weights_info(self):
assert self._is_actor
if hasattr(self, "_weights_info"):
return self._weights_info
if self._is_offload_param:
load_megatron_model_to_gpu(self.actor_module)
params_generator = self._get_actor_params_generator()
ret = []
for key, tensor in params_generator:
ret.append((key, tensor.size(), tensor.dtype))
self._weights_info = ret
# Here, we only call this function at the beginning,
# and immediately afterwards we call sync_rollout_weights.
# So we no longer call offload in this.
return ret
class DetachAsyncRolloutWorker(DetachSync):
def __init__(self, config: DictConfig, role: str):
print(f"[DetachAsyncRolloutWorker] {DetachAsyncRolloutWorker.__mro__}")
ActorRolloutRefWorker.__init__(self, config, role)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def set_actor_weights_info(self, weights_info):
assert self._is_rollout
self._weights_info = weights_info
|
verl__experimental__one_step_off_policy__megatron_workers.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This trainer supports model-agonistic model initialization with huggingface
"""
import asyncio
import uuid
from pprint import pprint
from typing import Optional
import numpy as np
import ray
import torch
from omegaconf import OmegaConf
from ray.util.collective import collective
from torch.utils.data import Dataset, Sampler
from tqdm import tqdm
from verl import DataProto
from verl.experimental.one_step_off_policy.utils import need_critic
from verl.experimental.separation.ray_trainer import SeparateRayPPOTrainer
from verl.single_controller.ray import RayClassWithInitArgs, RayWorkerGroup
from verl.trainer.ppo import core_algos
from verl.trainer.ppo.ray_trainer import (
ResourcePoolManager,
compute_response_mask,
)
from verl.trainer.ppo.reward import extract_reward
from verl.trainer.ppo.utils import Role, WorkerType, need_reference_policy, need_reward_model
from verl.utils.debug import marked_timer
from verl.utils.rollout_skip import RolloutSkip
from verl.utils.tracking import ValidationGenerationsLogger
class OneStepOffRayTrainer(SeparateRayPPOTrainer):
def __init__(
self,
config,
tokenizer,
role_worker_mapping: dict[Role, WorkerType],
resource_pool_manager: ResourcePoolManager,
ray_worker_group_cls: type[RayWorkerGroup] = RayWorkerGroup,
processor=None,
train_dataset: Optional[Dataset] = None,
val_dataset: Optional[Dataset] = None,
collate_fn=None,
train_sampler: Optional[Sampler] = None,
device_name=None,
):
"""
Initialize distributed PPO trainer with Ray backend.
Note that this trainer runs on the driver process on a single CPU/GPU node.
Args:
config: Configuration object containing training parameters.
tokenizer: Tokenizer used for encoding and decoding text.
role_worker_mapping (dict[Role, WorkerType]): Mapping from roles to worker classes.
resource_pool_manager (ResourcePoolManager): Manager for Ray resource pools.
ray_worker_group_cls (RayWorkerGroup, optional): Class for Ray worker groups. Defaults to RayWorkerGroup.
processor: Optional data processor, used for multimodal data
train_dataset (Optional[Dataset], optional): Training dataset. Defaults to None.
val_dataset (Optional[Dataset], optional): Validation dataset. Defaults to None.
collate_fn: Function to collate data samples into batches.
train_sampler (Optional[Sampler], optional): Sampler for the training dataset. Defaults to None.
device_name (str, optional): Device name for training (e.g., "cuda", "cpu"). Defaults to None.
"""
# Store the tokenizer for text processing
self.tokenizer = tokenizer
self.processor = processor
self.config = config
self.hybrid_engine = config.actor_rollout_ref.hybrid_engine
assert not self.hybrid_engine
self.role_worker_mapping = role_worker_mapping
self.resource_pool_manager = resource_pool_manager
self.use_reference_policy = need_reference_policy(self.config)
self.use_rm = need_reward_model(self.config)
self.use_critic = need_critic(self.config)
self.ray_worker_group_cls = ray_worker_group_cls
self.device_name = device_name if device_name else self.config.trainer.device
self.validation_generations_logger = ValidationGenerationsLogger(
project_name=self.config.trainer.project_name,
experiment_name=self.config.trainer.experiment_name,
)
# if ref_in_actor is True, the reference policy will be actor without lora applied
lora_rank = config.actor_rollout_ref.model.get("lora", {}).get("rank", 0)
if lora_rank <= 0:
lora_rank = config.actor_rollout_ref.model.get("lora_rank", 0)
self.ref_in_actor = lora_rank > 0 or config.actor_rollout_ref.model.get("lora_adapter_path") is not None
# define in-reward KL control
# kl loss control currently not suppoorted
if self.config.algorithm.use_kl_in_reward:
self.kl_ctrl_in_reward = core_algos.get_kl_controller(self.config.algorithm.kl_ctrl)
self.use_prefix_grouper = self.config.actor_rollout_ref.actor.get("use_prefix_grouper", False)
self.use_legacy_worker_impl = config.trainer.get("use_legacy_worker_impl", "auto")
self._create_dataloader(train_dataset, val_dataset, collate_fn, train_sampler)
# ==================== SeparateRayPPOTrainer config ====================
self.global_steps = 0
self.epoch = 0
self.max_steps_duration = 0
self.progress_bar = None
self.logger = None
self.is_last_step = False
self.prev_step_profile = False
self.curr_step_profile = False
self.next_step_profile = False
self.last_val_metrics = {}
self.metrics = {}
self.timing_raw = {}
# reward message
self.future_reward = None
self.reward_tensor = None
self.reward_extra_infos_dict = {}
def _validate(self):
self.actor_rollout_wg = self.rollout_wg
ret = super()._validate()
self.actor_rollout_wg = self.actor_wg
return ret
def _create_actor_rollout_classes(self):
for role in [Role.Actor, Role.Rollout]:
resource_pool = self.resource_pool_manager.get_resource_pool(role)
role_cls = RayClassWithInitArgs(
cls=self.role_worker_mapping[role],
config=self.config.actor_rollout_ref,
role=str(role),
)
self.resource_pool_to_cls[resource_pool][str(role)] = role_cls
def _init_models(self):
if self.use_critic:
self.critic_wg = self.all_wg[str(Role.Critic)]
self.critic_wg.init_model()
if self.use_reference_policy and not self.ref_in_actor:
self.ref_policy_wg = self.all_wg[str(Role.RefPolicy)]
self.ref_policy_wg.init_model()
self.rm_wg = None
if self.use_rm:
self.rm_wg = self.all_wg[str(Role.RewardModel)]
self.rm_wg.init_model()
self.actor_wg = self.all_wg[str(Role.Actor)]
self.rollout_wg = self.all_wg[str(Role.Rollout)]
self.actor_wg.init_model()
self.rollout_wg.init_model()
self.actor_rollout_wg = self.actor_wg
weights_info = self.actor_wg.get_actor_weights_info()[0]
self.rollout_wg.set_actor_weights_info(weights_info)
self._create_weight_sync_group()
def _init_async_rollout_manager(self):
# infrastructure overview: https://verl.readthedocs.io/en/latest/advance/reward_loop.html#architecture-design
# agent_reward_loop: streaming reward computation with actor rollout
# two conditions satisfied: (1) no reward model, or (2) reward model with extra resource pool
enable_agent_reward_loop = not self.use_rm or self.config.reward.reward_model.enable_resource_pool
# if enable_agent_reward_loop, we directly pass reward_loop_workers to agent loop manager
# to stream reward computation with actor rollout
reward_loop_worker_handles = self.reward_loop_manager.reward_loop_workers if enable_agent_reward_loop else None
# create async rollout manager and request scheduler
assert self.config.actor_rollout_ref.rollout.mode == "async"
from verl.experimental.one_step_off_policy.agent_loop import OneStepOffAgentLoopManager
self.async_rollout_mode = True
self.async_rollout_manager = OneStepOffAgentLoopManager(
config=self.config, worker_group=self.rollout_wg, reward_loop_worker_handles=reward_loop_worker_handles
)
def _create_weight_sync_group(self):
from verl.utils.device import get_nccl_backend
actor_rollout_workers = self.actor_wg.workers + self.rollout_wg.workers
n_workers = len(actor_rollout_workers)
if self.device_name == "npu":
master_address = ray.get(self.actor_wg.workers[0]._get_node_ip.remote()).strip("[]")
master_port = ray.get(self.actor_wg.workers[0]._get_free_port.remote())
self.actor_wg.create_weight_sync_group(
master_address,
master_port,
0,
n_workers,
)
ray.get(
self.rollout_wg.create_weight_sync_group(
master_address,
master_port,
len(self.actor_wg.workers),
n_workers,
)
)
else:
# Create Ray collective group for fallback communication
collective.create_collective_group(
actor_rollout_workers,
n_workers,
list(range(0, n_workers)),
backend=get_nccl_backend(),
group_name="actor_rollout",
)
def sync_rollout_weights(self):
self.actor_wg.sync_rollout_weights()
ray.get(self.rollout_wg.sync_rollout_weights())
def _create_continuous_iterator(self):
"""
Create a continuous data iterator across epoch
"""
for epoch in range(self.config.trainer.total_epochs):
iterator = iter(self.train_dataloader)
for batch_dict in iterator:
yield epoch, batch_dict
async def _async_gen_next_batch(self, continuous_iterator):
"""
Call parameter synchronization and asynchronous sequence generation.
"""
try:
epoch, batch_dict = next(continuous_iterator)
except StopIteration:
return None
except Exception as e:
print(f"Error in async_gen_next_batch: {e}")
return None
metrics = {}
timing_raw = {}
# Create the initial batch from the data loader
batch = DataProto.from_single_dict(batch_dict)
# add uid to batch
batch.non_tensor_batch["uid"] = np.array([str(uuid.uuid4()) for _ in range(len(batch.batch))], dtype=object)
gen_batch = self._get_gen_batch(batch)
# pass global_steps to trace
gen_batch.meta_info["global_steps"] = self.global_steps
gen_batch_output = gen_batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True)
# async generation
with marked_timer("generate_async", timing_raw, color="purple"):
gen_batch_output = await self.async_rollout_manager.generate_sequences_async(gen_batch_output)
# repeat to align with repeated responses in rollout
batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True)
batch = batch.union(gen_batch_output)
if "response_mask" not in batch.batch.keys():
batch.batch["response_mask"] = compute_response_mask(batch)
# Balance the number of valid tokens across DP ranks.
# NOTE: This usually changes the order of data in the `batch`,
# which won't affect the advantage calculation (since it's based on uid),
# but might affect the loss calculation (due to the change of mini-batching).
if self.config.trainer.balance_batch:
self._balance_batch(batch, metrics=metrics)
# compute global_valid tokens
batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist()
# Launch individual reward computations as each generation completes
future_reward = None
# Return the original, now-modified `batch` and the `future_reward`
return metrics, timing_raw, epoch, batch, future_reward
@staticmethod
@ray.remote
def _launch_individual_rewards(batch, config, tokenizer):
reward_tensor, reward_extra_info = extract_reward(batch)
return reward_tensor, reward_extra_info
async def fit(self):
"""
The training loop of PPO.
The driver process only need to call the compute functions of the worker group through RPC
to construct the PPO dataflow.
The light-weight advantage computation is done on the driver process.
"""
from verl.utils.tracking import Tracking
self.logger = Tracking(
project_name=self.config.trainer.project_name,
experiment_name=self.config.trainer.experiment_name,
default_backend=self.config.trainer.logger,
config=OmegaConf.to_container(self.config, resolve=True),
)
self.global_steps = 0
# load checkpoint and update weights before doing anything
self._load_checkpoint()
self._fit_update_weights()
# perform validation before training
# currently, we only support validation using the reward_function.
if self.config.trainer.get("val_before_train", True):
val_metrics = self._validate()
assert val_metrics, f"{val_metrics=}"
pprint(f"Initial validation metrics: {val_metrics}")
self.logger.log(data=val_metrics, step=self.global_steps)
if self.config.trainer.get("val_only", False):
return
if self.config.actor_rollout_ref.rollout.get("skip_rollout", False):
rollout_skip = RolloutSkip(self.config, self.actor_rollout_wg)
rollout_skip.wrap_generate_sequences()
# add tqdm
self.progress_bar = tqdm(total=self.total_training_steps, initial=self.global_steps, desc="Training Progress")
# we start from step 1
self.global_steps += 1
self.last_val_metrics = None
self.max_steps_duration = 0
self.prev_step_profile = False
self.curr_step_profile = (
self.global_steps in self.config.global_profiler.steps
if self.config.global_profiler.steps is not None
else False
)
self.next_step_profile = False
# across epoch iterator
continuous_iterator = self._create_continuous_iterator()
# Start the first asynchronous generation task.
batch_data_future = asyncio.create_task(self._async_gen_next_batch(continuous_iterator))
while batch_data_future is not None:
batch_data_future = await self.fit_step(batch_data_future, continuous_iterator)
if self.is_last_step:
return
async def fit_step(self, batch_data_future, continuous_iterator):
"""
Single-step training template method. Handles all logic for one training step.
Flow:
1. Pre-step processing -> 2. Get batch -> 3. Generate sequences ->
4. Compute reward -> 5. Compute log_prob -> 6. Compute reward ->
7. Compute advantage -> 8. Update critic -> 9. Update actor -> 10. Post-step processing
Args:
batch_data_future: batch future
"""
self.metrics = {"training/global_step": self.global_steps, "training/epoch": self.epoch}
self.timing_raw = {}
# reward message
self.future_reward = None
self.reward_tensor = None
self.reward_extra_infos_dict = {}
self._fit_prepare_step()
self._fit_start_profile()
with marked_timer("step", self.timing_raw):
batch, batch_data_future = await self._fit_generate(batch_data_future, continuous_iterator)
# await asyncio.sleep(0) ensures:
# Asynchronous tasks can start executing immediately
# The event loop can handle other pending coroutines
# Prevents computations in a certain phase from blocking the entire asynchronous workflow
#
# The purpose here is to ensure that after triggering
# `self.async_rollout_manager.generate_sequences_async(gen_batch_output)`,
# the subsequent relevant logic can proceed in a timely manner
await asyncio.sleep(0)
batch = self._fit_compute_reward(batch)
await asyncio.sleep(0)
batch = self._fit_compute_log_prob(batch)
await asyncio.sleep(0)
batch = self._fit_compute_ref_log_prob(batch)
await asyncio.sleep(0)
batch = self._fit_compute_critic(batch)
await asyncio.sleep(0)
batch = self._fit_compute_advantage(batch)
await asyncio.sleep(0)
batch = self._fit_update_critic(batch)
await asyncio.sleep(0)
batch = self._fit_update_actor(batch)
await asyncio.sleep(0)
self._fit_update_weights()
await asyncio.sleep(0)
self._fit_dump_data(batch)
await asyncio.sleep(0)
self._fit_validate()
await asyncio.sleep(0)
self._fit_save_checkpoint()
await asyncio.sleep(0)
self._fit_stop_profile()
self._fit_collect_metrics(batch)
self._fit_torch_memory()
self._fit_experimental(batch)
self._fit_postprocess_step()
return batch_data_future
async def _fit_generate(self, batch_data_future, continuous_iterator):
metrics = self.metrics
timing_raw = self.timing_raw
with marked_timer("gen", timing_raw, color="red"):
_metrics, _timing_raw, epoch, batch, future_reward = await batch_data_future
timing_raw.update(batch.meta_info["timing"])
timing_raw.update(_timing_raw)
metrics.update(_metrics)
batch.meta_info.pop("timing", None)
# sync weights from actor to rollout
with marked_timer("sync_rollout_weights", timing_raw, color="purple"):
self._fit_update_weights()
await self.async_rollout_manager.clear_kv_cache()
# async next generation
if not self.is_last_step:
batch_data_future = asyncio.create_task(self._async_gen_next_batch(continuous_iterator))
await asyncio.sleep(0)
else:
batch_data_future = None
return batch, batch_data_future
def _fit_update_weights(self):
# TODO: use checkpoint engine to update weight
# self.sync_rollout_weights()
pass
|
verl__experimental__one_step_off_policy__ray_trainer.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf import DictConfig
from verl.trainer.ppo.core_algos import AdvantageEstimator
def need_critic(config: DictConfig) -> bool:
"""Given a config, do we need critic"""
if config.algorithm.adv_estimator == AdvantageEstimator.GAE:
return True
elif config.algorithm.adv_estimator in [
AdvantageEstimator.GRPO,
AdvantageEstimator.GRPO_PASSK,
AdvantageEstimator.REINFORCE_PLUS_PLUS,
# AdvantageEstimator.REMAX, # TODO:REMAX advantage estimator is not yet supported in one_step_off_policy
AdvantageEstimator.RLOO,
AdvantageEstimator.OPO,
AdvantageEstimator.REINFORCE_PLUS_PLUS_BASELINE,
AdvantageEstimator.GPG,
]:
return False
else:
raise NotImplementedError
|
verl__experimental__one_step_off_policy__utils.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import os
import aiohttp
import numpy as np
import ray
import torch
from omegaconf import DictConfig, open_dict
from tensordict import TensorDict
from verl.protocol import DataProto
from verl.single_controller.ray.base import RayResourcePool
from verl.trainer.ppo.reward import load_reward_manager
from verl.utils import hf_tokenizer
from verl.utils.fs import copy_to_local
from .reward_model import RewardModelManager
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def migrate_legacy_reward_impl(config):
"""
Migrate the legacy reward model implementation to the new one.
"""
# 1. reward workers migration
# config.reward_model.num_workers -> config.reward.num_workers
if config.reward_model.num_workers is not None:
config.reward.num_workers = config.reward_model.num_workers
# 2.reward manager migration
# config.reward_model.reward_manager -> config.reward.reward_manager
if config.reward_model.reward_manager is not None:
config.reward.reward_manager.name = config.reward_model.reward_manager
if config.reward_model.reward_loop_source is not None:
config.reward.reward_manager.source = config.reward_model.reward_loop_source
config.reward.reward_manager.module.path = config.reward_model.reward_loop_module_path
config.reward.reward_manager.module.name = config.reward_model.reward_loop_class_name
# 3. custom reward function migration
# config.custom_reward_function -> config.reward.custom_reward_function
if not all(v is None for v in config.custom_reward_function.values()):
config.reward.custom_reward_function = config.custom_reward_function
# 4. reward model migration
# config.reward_model -> config.reward.reward_model
for key in ["enable", "enable_resource_pool", "n_gpus_per_node", "nnodes"]:
if config.reward_model.get(key) is not None:
config.reward.reward_model[key] = config.reward_model[key]
if config.reward_model.model.path is not None:
config.reward.reward_model.model_path = config.reward_model.model.path
# config.reward_model.reward_kwargs -> config.reward.reward_kwargs (for dapo algo)
if config.reward_model.get("reward_kwargs") is not None:
with open_dict(config.reward):
config.reward["reward_kwargs"] = config.reward_model["reward_kwargs"]
# config.reward_model.rollout -> config.reward.reward_model.rollout
legacy_rollout = config.reward_model.rollout
for key in legacy_rollout.keys():
if legacy_rollout[key] is not None:
config.reward.reward_model.rollout[key] = legacy_rollout[key]
# 5. sandbox_fusion migration
# config.sandbox_fusion -> reward.sandbox_fusion
if not all(v is None for v in config.sandbox_fusion.values()):
config.reward.sandbox_fusion = config.sandbox_fusion
# 6. delete legacy config from configs
with open_dict(config):
del config.reward_model
del config.custom_reward_function
del config.sandbox_fusion
return config
class RewardLoopWorker:
"""
RewardLoopWork can tackle reward computation:
(1) rule-based reward computation
(2) reward model-based reward computation (both disrm and genrm)
(3) high-flexible user-customized reward function (can access rm by posting requests to reward_model_router)
Reward Computation Logic:
- if user-customized reward function is provided:
-> directly use user-customized reward function
- if user-customized reward function is not provided:
-> rm is not enabled: use default rule-based reward function
-> rm is disrm: compute reward score using disrm
-> rm is genrm: raise error (user-costomized reward func must be provided)
"""
def __init__(self, config: DictConfig, reward_router_address: str = None):
"""
Args:
config: DictConfig, the config for reward loop worker.
reward_router_address: str, the address of reward router.
"""
self.config = config
self.reward_router_address = reward_router_address
self._init_reward_fn()
def _init_reward_fn(self):
input_tokenizer_local_path = copy_to_local(self.config.actor_rollout_ref.model.path)
self.input_tokenizer = hf_tokenizer(input_tokenizer_local_path, trust_remote_code=True)
self.reward_model_tokenizer = None
if self.config.reward.reward_model.enable:
reward_model_tokenizer_local_path = copy_to_local(self.config.reward.reward_model.model_path)
self.reward_model_tokenizer = hf_tokenizer(reward_model_tokenizer_local_path, trust_remote_code=True)
self.reward_manager = load_reward_manager(
self.config,
self.input_tokenizer,
reward_router_address=self.reward_router_address,
reward_model_tokenizer=self.reward_model_tokenizer,
)
async def compute_score_batch(self, data: DataProto) -> list[dict]:
tasks = []
for i in range(len(data)):
tasks.append(asyncio.create_task(self.compute_score(data[i : i + 1])))
outputs = await asyncio.gather(*tasks)
return outputs
async def compute_score(self, data: DataProto) -> dict:
assert len(data) == 1, "RewardLoopWorker only support single data item"
if self.config.reward.custom_reward_function.path is not None:
# directly use user-customized reward function
return await self.reward_manager.run_single(data)
else:
if self.config.reward.reward_model.enable:
# we assume the rm is disrm
# genrm must set custom_reward_function
return await self.compute_score_disrm(data)
else:
return await self.reward_manager.run_single(data)
async def _post_request(self, payload: dict, endpoint: str, max_retries: int = 16):
url = f"http://{self.reward_router_address}/{endpoint}"
last_exception = None
for attempt in range(max_retries):
try:
# It's safer to have a timeout instead of None, which can hang indefinitely.
timeout = aiohttp.ClientTimeout(total=None)
async with aiohttp.ClientSession(timeout=timeout) as session:
async with session.post(url, json=payload) as resp:
resp.raise_for_status()
return await resp.json()
except aiohttp.ClientResponseError as e:
# Do not retry on 4xx client errors, but retry on 5xx server errors.
if 400 <= e.status < 500:
logger.error(f"Request to {url} failed with client error HTTP {e.status}: {e}. Not retrying.")
raise
last_exception = e
logger.warning(
f"[Attempt {attempt + 1}/{max_retries}] Request to {url} failed with HTTP {e.status}: {e}. "
"Retrying..."
)
except (asyncio.TimeoutError, aiohttp.ClientConnectorError) as e:
last_exception = e
logger.warning(f"[Attempt {attempt + 1}/{max_retries}] Request to {url} failed: {e}. Retrying...")
except Exception as e:
last_exception = e
logger.warning(
f"[Attempt {attempt + 1}/{max_retries}] Request to {url} failed with unexpected error: {e}. "
"Retrying..."
)
if attempt < max_retries - 1:
# Using exponential backoff is generally better than a fixed sleep.
backoff_seconds = 2**attempt
await asyncio.sleep(min(backoff_seconds, 30))
logger.error(f"Max retries ({max_retries}) reached for request to {url}.")
if last_exception:
raise last_exception
async def _preprocess_reward_inputs(self, data: DataProto) -> str:
assert len(data) == 1, "RewardLoopWorker only support single data item"
data_item = data[0]
assert "raw_prompt" in data_item.non_tensor_batch
# extract raw prompt
chat: list = list(data_item.non_tensor_batch["raw_prompt"])
# extract response
response_ids = data_item.batch["responses"]
response_length = response_ids.shape[-1]
valid_response_length = data_item.batch["attention_mask"][-response_length:].sum()
valid_response_ids = response_ids[:valid_response_length]
# decode
rollout_response = self.input_tokenizer.decode(valid_response_ids)
# remove bos and eos
rollout_response = rollout_response.replace(self.input_tokenizer.eos_token, "")
chat.append({"role": "assistant", "content": rollout_response})
rm_prompt = self.reward_model_tokenizer.apply_chat_template(
chat,
add_generation_prompt=False,
tokenize=False,
)
# llama tokenizer will add bos token by default
# will be removed in vllm >= 0.11.2, where we can add "add_special_tokens" = False
if self.reward_model_tokenizer.bos_token is not None and rm_prompt.startswith(
self.reward_model_tokenizer.bos_token
):
rm_prompt = rm_prompt[len(self.reward_model_tokenizer.bos_token) :]
return rm_prompt
async def compute_score_disrm(self, data: DataProto) -> dict:
disrm_prompt = await self._preprocess_reward_inputs(data)
engine_name = self.config.reward.reward_model.rollout.name
model_name = self.config.reward.reward_model.model_path
if engine_name == "vllm":
payloads = {
"model": model_name,
"input": disrm_prompt,
"use_activation": False,
}
output = await self._post_request(payloads, "classify")
rm_score = output["data"][-1]["probs"][-1]
elif engine_name == "sglang":
payloads = {
"model": model_name,
"input": disrm_prompt,
}
output = await self._post_request(payloads, "v1/embeddings")
rm_score = output["data"][-1]["embedding"][-1]
elif engine_name == "trtllm":
# TODO: remove this once TRT-LLM switches to TorchSampler
raise ValueError("TensorRT-LLM backend does not support reward models currently.")
payloads = {
"model": model_name,
"prompt": disrm_prompt,
"return_context_logits": True,
}
output = await self._post_request(payloads, "v1/completions")
rm_score = output["choices"][0]["context_logits"]
assert isinstance(rm_score, list) and len(rm_score) > 0, (
"TensorRT-LLM OpenAI server response for reward score is not in the expected format."
)
rm_score = float(rm_score[0][0])
logger.debug(f"rm score: {rm_score}")
else:
raise NotImplementedError(f"RewardLoopManager does not support {engine_name}")
return {"reward_score": rm_score}
class RewardLoopManager:
"""
RewardLoopManager run in single controller.
This class will create reward loop workers and manage them.
"""
def __init__(self, config: DictConfig, rm_resource_pool: RayResourcePool = None):
self.config = config
if self.config.reward.reward_model.enable:
self.reward_model_manager = RewardModelManager(config.reward.reward_model, rm_resource_pool)
self.reward_router_address = self.reward_model_manager.get_router_address()
else:
self.reward_model_manager = None
self.reward_router_address = None
self.reward_loop_workers_class = ray.remote(RewardLoopWorker)
self._init_reward_loop_workers()
def _init_reward_loop_workers(self):
self.reward_loop_workers = []
num_workers = self.config.reward.num_workers
node_ids = [node["NodeID"] for node in ray.nodes() if node["Alive"] and node["Resources"].get("CPU", 0) > 0]
for i in range(num_workers):
# Round-robin scheduling over the all nodes
node_id = node_ids[i % len(node_ids)]
self.reward_loop_workers.append(
self.reward_loop_workers_class.options(
name=f"reward_loop_worker_{i}",
scheduling_strategy=ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy(
node_id=node_id,
soft=True,
),
).remote(self.config, self.reward_router_address)
)
def compute_rm_score(self, data: DataProto) -> DataProto:
if self.reward_model_manager is not None:
self.reward_model_manager.wake_up()
chunks = data.chunk(len(self.reward_loop_workers))
outputs = ray.get(
[
worker.compute_score_batch.remote(chunk)
for worker, chunk in zip(self.reward_loop_workers, chunks, strict=True)
]
)
outputs_flat = [item for sublist in outputs for item in sublist]
# compute rm score
scores = [item["reward_score"] for item in outputs_flat]
prompt_length = data.batch["prompts"].size(1)
valid_response_length = data.batch["attention_mask"][:, prompt_length:].sum(dim=1)
rm_scores = torch.zeros_like(data.batch["responses"], dtype=torch.float32)
rm_scores[torch.arange(rm_scores.size(0)), valid_response_length - 1] = torch.tensor(
scores, dtype=torch.float32
)
batch = TensorDict({"rm_scores": rm_scores}, batch_size=len(data))
reward_extra_infos = [output.get("reward_extra_info", {}) for output in outputs_flat]
reward_extra_keys = list(reward_extra_infos[0].keys())
non_tensor_batch = {}
for key in reward_extra_keys:
non_tensor_batch[key] = np.array([info[key] for info in reward_extra_infos])
if self.reward_model_manager is not None:
self.reward_model_manager.sleep()
return DataProto(
batch=batch, non_tensor_batch=non_tensor_batch, meta_info={"reward_extra_keys": reward_extra_keys}
)
def _run_all(self, tasks: list[asyncio.Task]):
async def run_all():
return await asyncio.gather(*tasks)
return asyncio.run(run_all())
|
verl__experimental__reward_loop__reward_loop.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from abc import ABC, abstractmethod
from typing import Any, Callable
from omegaconf import DictConfig
from transformers import AutoTokenizer
from verl import DataProto
from verl.utils.ray_utils import get_event_loop
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
RawRewardFn = Callable[..., Any] | None
class RewardManagerBase(ABC):
_class_initialized = False
def __init__(self, config: DictConfig, tokenizer: AutoTokenizer, compute_score: RawRewardFn):
"""Initialize reward manager.
Args:
config (DictConfig): YAML config.
tokenizer (AutoTokenizer): Tokenizer for tokenize messages.
"""
self.config = config
self.tokenizer = tokenizer
self.compute_score = compute_score
self.loop = get_event_loop()
self.init_class(config, tokenizer)
@classmethod
def init_class(cls, config: DictConfig, tokenizer: AutoTokenizer):
"""Initialize class state shared across all instances."""
if cls._class_initialized:
return
cls._class_initialized = True
@abstractmethod
async def run_single(self, data: DataProto):
raise NotImplementedError
|
verl__experimental__reward_loop__reward_manager__base.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from verl import DataProto
from verl.experimental.reward_loop.reward_manager import register
from verl.experimental.reward_loop.reward_manager.base import RewardManagerBase
from verl.utils.reward_score import default_compute_score
@register("dapo")
class DAPORewardManager(RewardManagerBase):
"""DAPO Reward Manager."""
def __init__(self, config, tokenizer, compute_score, reward_router_address=None, reward_model_tokenizer=None):
super().__init__(config, tokenizer, compute_score)
self.compute_score = compute_score or default_compute_score
self.is_async_reward_score = inspect.iscoroutinefunction(self.compute_score)
# DAPO Reward Config
overlong_buffer_cfg = config.reward.get("reward_kwargs", {}).get("overlong_buffer_cfg", None)
self.overlong_buffer_cfg = overlong_buffer_cfg
self.max_resp_len = config.reward.get("reward_kwargs", {}).get("max_resp_len", None)
self.reward_router_address = reward_router_address
self.reward_model_tokenizer = reward_model_tokenizer
if self.overlong_buffer_cfg is not None:
assert self.max_resp_len is not None, (
f"max_resp_len must be provided if {overlong_buffer_cfg=}, but got None"
)
assert self.max_resp_len >= self.overlong_buffer_cfg.len, (
"max_resp_len must be larger than overlong_buffer.len"
)
assert not self.overlong_buffer_cfg.enable or self.overlong_buffer_cfg.len > 0, (
"overlong_buffer.len must be positive when overlong penalty is enabled,"
f"but got {self.overlong_buffer_cfg.len}."
"To disable the overlong penalty, set overlong_buffer.enable = False"
)
async def run_single(self, data: DataProto) -> dict:
assert len(data) == 1, "Only support single data item"
data_item = data[0]
response_ids = data_item.batch["responses"]
response_length = response_ids.shape[-1]
valid_response_length = data_item.batch["attention_mask"][-response_length:].sum()
valid_response_ids = response_ids[:valid_response_length]
data_source = data_item.non_tensor_batch["data_source"]
ground_truth = data_item.non_tensor_batch["reward_model"]["ground_truth"]
extra_info = data_item.non_tensor_batch.get("extra_info", {})
response_str = await self.loop.run_in_executor(
None, lambda: self.tokenizer.decode(valid_response_ids, skip_special_tokens=True)
)
extra_reward_kwargs = (
{
"reward_router_address": self.reward_router_address,
"reward_model_tokenizer": self.reward_model_tokenizer,
}
if self.reward_router_address is not None
else {}
)
if self.is_async_reward_score:
result = await self.compute_score(
data_source=data_source,
solution_str=response_str,
ground_truth=ground_truth,
extra_info=extra_info,
**extra_reward_kwargs,
)
else:
result = await self.loop.run_in_executor(
None,
lambda: self.compute_score(
data_source=data_source,
solution_str=response_str,
ground_truth=ground_truth,
extra_info=extra_info,
**extra_reward_kwargs,
),
)
reward_extra_info = {}
score: float
if isinstance(result, dict):
score = result["score"]
for key, value in result.items():
reward_extra_info[key] = value
else:
score = result
reward_extra_info["acc"] = score
reward = score
if self.overlong_buffer_cfg is not None and self.overlong_buffer_cfg.enable:
overlong_buffer_len = self.overlong_buffer_cfg.len
expected_len = self.max_resp_len - overlong_buffer_len
exceed_len = valid_response_length - expected_len
overlong_penalty_factor = self.overlong_buffer_cfg.penalty_factor
overlong_reward = min(-exceed_len / overlong_buffer_len * overlong_penalty_factor, 0)
reward += overlong_reward
if self.overlong_buffer_cfg.log:
reward_extra_info["overlong_reward"] = overlong_reward
reward_extra_info["overlong"] = overlong_reward < 0
return {"reward_score": reward, "reward_extra_info": reward_extra_info}
|
verl__experimental__reward_loop__reward_manager__dapo.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import inspect
import logging
from omegaconf import DictConfig
from transformers import AutoTokenizer
from verl import DataProto
from verl.experimental.reward_loop.reward_manager import register as register_manager
from verl.experimental.reward_loop.reward_manager.base import RewardManagerBase
from verl.utils.ray_utils import get_event_loop
from verl.utils.reward_score import default_compute_score
from verl.workers.reward_manager import register as register_manager_legacy
logger = logging.getLogger(__file__)
class AsyncTokenBucket:
"""Async token bucket for rate limiting with variable token consumption.
The token bucket algorithm is a classic rate limiting technique that allows
for burst traffic while maintaining an average rate limit. This implementation
is async-first and thread-safe, designed for use in concurrent environments.
The bucket starts full and refills at a constant rate (rate_limit tokens/second).
When tokens are acquired, they are consumed from the bucket. If insufficient
tokens are available, the acquire() method will sleep until enough tokens
have been refilled.
This implementation supports variable token consumption, making it suitable
for rate limiting based on request size (e.g., API token usage).
Args:
rate_limit (float): The rate at which tokens are added to the bucket,
in tokens per second. For example, rate_limit=10.0 means 10 tokens
are added per second (or 600 per minute).
max_tokens (float, optional): The maximum capacity of the token bucket.
Defaults to rate_limit if not specified. This value determines the
maximum burst size allowed.
Attributes:
rate_limit (float): Tokens added per second.
max_tokens (float): Maximum bucket capacity.
tokens (float): Current number of available tokens.
last_update (float | None): Timestamp of last token update (from event loop).
lock (asyncio.Lock): Async lock for thread-safe token operations.
Example:
>>> # Limit to 60 requests per minute (1 request per second)
>>> rpm_limiter = AsyncTokenBucket(rate_limit=1.0, max_tokens=1.0)
>>> await rpm_limiter.acquire(1.0) # Consumes 1 token
>>>
>>> # Limit to 10000 tokens per minute (~166.67 tokens per second)
>>> tpm_limiter = AsyncTokenBucket(rate_limit=166.67, max_tokens=166.67)
>>> await tpm_limiter.acquire(100.0) # Consumes 100 tokens
Thread Safety:
All operations are protected by an asyncio.Lock, making this class safe
for concurrent use across multiple coroutines.
Algorithm Details:
1. On each acquire(), calculate elapsed time since last update
2. Refill tokens: tokens += elapsed * rate_limit (capped at max_tokens)
3. If tokens >= num_tokens: consume tokens and return
4. Otherwise: calculate wait_time = tokens_needed / rate_limit, then sleep
5. Retry after sleep (loop back to step 1)
"""
def __init__(self, rate_limit: float, max_tokens: float = None):
self.rate_limit = rate_limit
self.max_tokens = max_tokens or rate_limit
self.tokens = self.max_tokens
self.last_update = None
self.lock = asyncio.Lock()
async def acquire(self, num_tokens: float = 1.0) -> None:
"""Acquire tokens from the bucket, waiting if necessary.
This method will block (using asyncio.sleep) until sufficient tokens
are available. It automatically refills tokens based on elapsed time
and the configured rate_limit.
For requests exceeding max_tokens, the method will wait for enough time
to accumulate the required tokens at the configured rate_limit, allowing
tokens to temporarily go negative.
Args:
num_tokens (float): Number of tokens to consume. Defaults to 1.0.
Can be fractional for fine-grained rate limiting.
Returns:
None: Returns when tokens have been successfully acquired.
Raises:
No exceptions are raised. This method will wait indefinitely until
tokens become available.
Example:
>>> bucket = AsyncTokenBucket(rate_limit=10.0)
>>> await bucket.acquire(5.0) # Acquire 5 tokens
>>> await bucket.acquire(1.0) # Acquire 1 more token
Implementation Notes:
- Uses event loop's time() for high-precision timestamps
- Lock is released during sleep to allow other coroutines to proceed
- Tokens are refilled continuously based on elapsed time
- For requests > max_tokens, allows temporary negative balance
"""
# Handle requests larger than max_tokens separately
if num_tokens > self.max_tokens:
wait_time = 0.0
async with self.lock:
loop = get_event_loop()
now = loop.time()
if self.last_update is None:
self.last_update = now
elapsed = now - self.last_update
new_tokens = elapsed * self.rate_limit
self.tokens = min(self.max_tokens, self.tokens + new_tokens)
tokens_needed = num_tokens - self.tokens
if tokens_needed > 0:
wait_time = tokens_needed / self.rate_limit
self.tokens -= num_tokens
self.last_update = now
if wait_time > 0:
await asyncio.sleep(wait_time)
return
# Standard case: request <= max_tokens
while True:
wait_time = 0.0
async with self.lock:
loop = get_event_loop()
now = loop.time()
if self.last_update is None:
self.last_update = now
elapsed = now - self.last_update
new_tokens = elapsed * self.rate_limit
self.tokens = min(self.max_tokens, self.tokens + new_tokens)
self.last_update = now
if self.tokens >= num_tokens:
self.tokens -= num_tokens
return
tokens_needed = num_tokens - self.tokens
wait_time = tokens_needed / self.rate_limit
if wait_time > 0:
await asyncio.sleep(wait_time)
@register_manager("rate_limited")
@register_manager_legacy("rate_limited")
class RateLimitedRewardManager(RewardManagerBase):
"""Reward manager with rate limiting for API-based reward functions.
This manager implements a sophisticated three-layer rate limiting system
designed for LLM-as-judge scenarios where reward computation involves
external API calls (e.g., OpenAI, Anthropic, Claude) that have rate limits.
The three layers of rate limiting are:
1. **Concurrency limiting** (max_concurrent): Limits the number of
simultaneous API requests using asyncio.Semaphore. This prevents
overwhelming the API with too many parallel connections.
2. **Request rate limiting** (max_rpm): Limits requests per minute
using AsyncTokenBucket. Each request consumes 1 token. Useful for
APIs with per-minute request quotas.
3. **Token rate limiting** (max_tpm): Limits tokens per minute using
AsyncTokenBucket. Each request consumes estimated_tokens_per_request
tokens. Essential for APIs that bill or limit based on token usage
(e.g., GPT-4 API).
All rate limiters are **global class-level resources**, meaning they are
shared across all instances of this manager. This ensures that rate limits
are enforced consistently across multiple workers in distributed training.
Rate Limiting Flow:
When processing a reward request, the manager:
1. Acquires RPM token (if rpm_limiter enabled)
2. Acquires TPM tokens (if tpm_limiter enabled)
3. Acquires concurrency semaphore
4. Executes reward computation with timeout
5. Releases concurrency semaphore
6. Tokens are automatically refilled by the token buckets
Args:
config (DictConfig): Configuration object containing reward_model settings:
- max_concurrent (int): Max parallel requests. Default: 1
- max_rpm (int | None): Max requests per minute. Default: None (unlimited)
- max_tpm (int | None): Max tokens per minute. Default: None (unlimited)
- estimated_tokens_per_request (int): Estimated tokens per request for
TPM limiting. Default: 2000
- timeout (float): Timeout for reward computation in seconds. Default: 300
tokenizer (AutoTokenizer): HuggingFace tokenizer for decoding responses.
compute_score (callable, optional): Custom reward scoring function. Can be
sync or async. Defaults to default_compute_score.
reward_router_address (str | None): Address for reward router service.
reward_model_tokenizer (AutoTokenizer | None): Optional tokenizer for reward model.
Class Attributes (Global State):
_semaphore (asyncio.Semaphore): Global concurrency limiter
_max_concurrent (int): Max concurrent requests
_rpm_limiter (AsyncTokenBucket | None): Request rate limiter
_max_rpm (int | None): Max requests per minute
_tpm_limiter (AsyncTokenBucket | None): Token rate limiter
_max_tpm (int | None): Max tokens per minute
_estimated_tokens_per_request (int): Estimated tokens per request
_class_initialized (bool): Whether class has been initialized
Example Configuration:
>>> config = DictConfig({
... "reward": {
... "max_concurrent": 10, # 10 parallel requests
... "max_rpm": 500, # 500 requests/minute
... "max_tpm": 100000, # 100k tokens/minute
... "estimated_tokens_per_request": 2000,
... "timeout": 60.0,
... }
... })
>>> manager = RateLimitedRewardManager(config, tokenizer)
Thread Safety:
This class is designed for concurrent use. All rate limiting resources
are protected by asyncio primitives (Lock, Semaphore).
See Also:
- AsyncTokenBucket: Token bucket implementation for rate limiting
- RewardManagerBase: Base class for reward managers
- verl.utils.reward_score.default_compute_score: Default scoring function
"""
# Class-level state for global rate limiting
_semaphore = None
_max_concurrent = None
_rpm_limiter = None
_max_rpm = None
_tpm_limiter = None
_max_tpm = None
_estimated_tokens_per_request = None
_class_initialized = False
@classmethod
def init_class(cls, config: DictConfig, tokenizer: AutoTokenizer):
"""Initialize class state shared across all instances."""
# Check if already initialized before calling parent.
#
# NOTE: This class owns a *global*, class-level set of rate limiters. Once the class has been
# initialized, subsequent instantiations cannot change the shared limiters. This is by design,
# but it can be surprising (and dangerous) when the first initialization happens with default
# values (often "unlimited") and later code tries to apply limits.
if cls._class_initialized:
rm_cfg = config.get("reward") or {}
incoming_max_rpm = rm_cfg.get("max_rpm", None)
incoming_max_tpm = rm_cfg.get("max_tpm", None)
# Warn when a caller is trying to change the global RPM/TPM limits after initialization.
# This commonly happens if the first instance was created without a config (legacy signature),
# which initializes the global limiters to their defaults and locks them in.
if (incoming_max_rpm != cls._max_rpm) or (incoming_max_tpm != cls._max_tpm):
if (
incoming_max_rpm is not None
or incoming_max_tpm is not None
or cls._max_rpm is not None
or cls._max_tpm is not None
):
logger.warning(
"RateLimitedRewardManager has already been initialized and its rate limiters are shared "
"globally across instances. The incoming (max_rpm/max_tpm) settings will be ignored. "
"This can lead to unexpected behavior (e.g., exceeding API rate limits) if the first "
"initialization used defaults (often unlimited). "
f"Existing: max_rpm={cls._max_rpm}, max_tpm={cls._max_tpm}. "
f"Incoming: max_rpm={incoming_max_rpm}, max_tpm={incoming_max_tpm}. "
"To apply different limits, ensure the first RateLimitedRewardManager created in this "
"process uses the desired configuration (or restart/reset the process)."
)
return
super().init_class(config, tokenizer)
rm_cfg = config.get("reward") or {}
# Concurrency limiter
cls._max_concurrent = rm_cfg.get("max_concurrent", 1)
cls._semaphore = asyncio.Semaphore(cls._max_concurrent)
# Request rate limiter (RPM)
cls._max_rpm = rm_cfg.get("max_rpm", None)
if cls._max_rpm is not None:
requests_per_second = cls._max_rpm / 60.0
cls._rpm_limiter = AsyncTokenBucket(rate_limit=requests_per_second, max_tokens=requests_per_second)
else:
cls._rpm_limiter = None
# Token rate limiter (TPM)
cls._max_tpm = rm_cfg.get("max_tpm", None)
cls._estimated_tokens_per_request = rm_cfg.get("estimated_tokens_per_request", 2000)
if cls._max_tpm is not None:
tokens_per_second = cls._max_tpm / 60.0
cls._tpm_limiter = AsyncTokenBucket(rate_limit=tokens_per_second, max_tokens=tokens_per_second)
else:
cls._tpm_limiter = None
log_msg = "Rate limiting configuration:\n"
log_msg += f" - Concurrency limit: {cls._max_concurrent}\n"
if cls._max_rpm is not None:
log_msg += f" - Request rate limit: {cls._max_rpm} RPM ({cls._max_rpm / 60.0:.2f} RPS)\n"
else:
log_msg += " - Request rate limit: unlimited\n"
if cls._max_tpm is not None:
log_msg += f" - Token rate limit: {cls._max_tpm} TPM ({cls._max_tpm / 60.0:.2f} TPS)\n"
log_msg += f" - Estimated tokens per request: {cls._estimated_tokens_per_request}\n"
else:
log_msg += " - Token rate limit: unlimited\n"
log_msg += "All limiters are shared globally across all workers."
logger.info(log_msg)
cls._class_initialized = True
def __init__(
self,
config,
tokenizer,
compute_score,
reward_router_address=None,
reward_model_tokenizer=None,
# Legacy (AbstractRewardManager) kwargs for compatibility. Not used.
num_examine: int | None = None,
reward_fn_key: str | None = None,
**kwargs,
):
# When called via the legacy AbstractRewardManager signature, `config` may be absent.
# In that case we fall back to an empty config so training can proceed.
if config is None:
config = DictConfig({"reward": {}})
if tokenizer is None:
raise TypeError("RateLimitedRewardManager requires `tokenizer`.")
super().__init__(config, tokenizer, compute_score)
self.compute_score = compute_score or default_compute_score
self.is_async_reward_score = inspect.iscoroutinefunction(self.compute_score)
self.reward_router_address = reward_router_address
self.reward_model_tokenizer = reward_model_tokenizer
self.timeout = config.reward.get("timeout", 300.0)
async def _compute_reward(
self, data_source: str, solution_str: str, ground_truth: str, extra_info: dict
) -> dict | float:
extra_reward_kwargs = (
{
"reward_router_address": self.reward_router_address,
"reward_model_tokenizer": self.reward_model_tokenizer,
}
if self.reward_router_address is not None
else {}
)
if self.is_async_reward_score:
return await self.compute_score(
data_source=data_source,
solution_str=solution_str,
ground_truth=ground_truth,
extra_info=extra_info,
**extra_reward_kwargs,
)
else:
return await self.loop.run_in_executor(
None,
lambda: self.compute_score(
data_source=data_source,
solution_str=solution_str,
ground_truth=ground_truth,
extra_info=extra_info,
**extra_reward_kwargs,
),
)
async def run_single(self, data: DataProto) -> dict:
assert len(data) == 1, "Only support single data item"
data_item = data[0]
response_ids = data_item.batch["responses"]
response_length = response_ids.shape[-1]
valid_response_length = data_item.batch["attention_mask"][-response_length:].sum()
valid_response_ids = response_ids[:valid_response_length]
data_source = data_item.non_tensor_batch["data_source"]
ground_truth = data_item.non_tensor_batch["reward_model"]["ground_truth"]
extra_info = data_item.non_tensor_batch.get("extra_info", {})
tool_extra_fields = data_item.non_tensor_batch.get("tool_extra_fields", None)
if tool_extra_fields is not None:
extra_info.update(tool_extra_fields.items())
response_str = await self.loop.run_in_executor(
None, lambda: self.tokenizer.decode(valid_response_ids, skip_special_tokens=True)
)
reward_extra_info = {}
# Apply rate limiting layers
if self._rpm_limiter is not None:
await self._rpm_limiter.acquire(1.0)
if self._tpm_limiter is not None:
estimated_tokens = self._estimated_tokens_per_request
await self._tpm_limiter.acquire(estimated_tokens)
async with self._semaphore:
try:
result = await asyncio.wait_for(
self._compute_reward(
data_source=data_source,
solution_str=response_str,
ground_truth=ground_truth,
extra_info=extra_info,
),
timeout=self.timeout,
)
score: float
if isinstance(result, dict):
score = result["score"]
for key, value in result.items():
reward_extra_info[key] = value
else:
score = result
reward_extra_info["acc"] = score
reward = score
except asyncio.TimeoutError:
logger.warning(
f"Reward computation timed out after {self.timeout}s for data_source={data_source}. "
f"Response preview: {response_str[:100]}..."
)
reward = 0.0
reward_extra_info["timeout"] = True
reward_extra_info["acc"] = 0.0
except Exception as e:
logger.error(
f"Reward computation failed for data_source={data_source}: {e}. "
f"Response preview: {response_str[:100]}..."
)
reward = 0.0
reward_extra_info["error"] = str(e)
reward_extra_info["acc"] = 0.0
return {"reward_score": reward, "reward_extra_info": reward_extra_info}
def __call__(self, data: DataProto, return_dict: bool = False):
"""Make the manager callable like traditional reward managers.
This method provides compatibility with the existing reward manager interface
by wrapping the async run_single method in a synchronous call.
Args:
data (DataProto): Input data containing prompts and responses.
return_dict (bool): If True, return a dict with reward_tensor and reward_extra_info.
If False, return only the reward_tensor. Defaults to False.
Returns:
torch.Tensor | dict: If return_dict is False, returns a tensor of shape [batch_size, response_length]
with rewards. If return_dict is True, returns a dict with:
- reward_tensor: The reward tensor
- reward_extra_info: Dict containing extra information about rewards
"""
from collections import defaultdict
import torch
# If there are pre-computed rm_scores, return them directly
if "rm_scores" in data.batch.keys():
if return_dict:
reward_extra_keys = data.meta_info.get("reward_extra_keys", [])
reward_extra_info = {key: data.non_tensor_batch[key] for key in reward_extra_keys}
return {"reward_tensor": data.batch["rm_scores"], "reward_extra_info": reward_extra_info}
else:
return data.batch["rm_scores"]
# Initialize reward tensor
reward_tensor = torch.zeros_like(data.batch["responses"], dtype=torch.float32)
reward_extra_info = defaultdict(list)
# Process each data item through the async event loop
async def process_batch():
tasks = []
for i in range(len(data)):
data_item = data[i : i + 1] # Get single item as DataProto slice
tasks.append(self.run_single(data_item))
results = await asyncio.gather(*tasks)
return results
# Run the async processing using self.loop property which lazily gets/creates event loop
# This ensures rate limiters and semaphores work correctly by using the same loop
results = self.loop.run_until_complete(process_batch())
# Aggregate results into reward tensor and extra info
for i, result in enumerate(results):
data_item = data[i]
response_ids = data_item.batch["responses"]
response_length = response_ids.shape[-1]
valid_response_length = data_item.batch["attention_mask"][-response_length:].sum()
reward = result["reward_score"]
reward_tensor[i, valid_response_length - 1] = reward
# Collect extra info
if "reward_extra_info" in result:
for key, value in result["reward_extra_info"].items():
reward_extra_info[key].append(value)
if return_dict:
return {
"reward_tensor": reward_tensor,
"reward_extra_info": reward_extra_info,
}
else:
return reward_tensor
|
verl__experimental__reward_loop__reward_manager__limited.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from verl import DataProto
from verl.experimental.reward_loop.reward_manager import register
from verl.experimental.reward_loop.reward_manager.base import RewardManagerBase
from verl.utils.reward_score import default_compute_score
@register("naive")
class NaiveRewardManager(RewardManagerBase):
"""The reward manager."""
def __init__(self, config, tokenizer, compute_score, reward_router_address=None, reward_model_tokenizer=None):
super().__init__(config, tokenizer, compute_score)
self.compute_score = compute_score or default_compute_score
self.is_async_reward_score = inspect.iscoroutinefunction(self.compute_score)
self.reward_router_address = reward_router_address
self.reward_model_tokenizer = reward_model_tokenizer
async def run_single(self, data: DataProto) -> dict:
assert len(data) == 1, "Only support single data item"
data_item = data[0]
response_ids = data_item.batch["responses"]
response_length = response_ids.shape[-1]
valid_response_length = data_item.batch["attention_mask"][-response_length:].sum()
valid_response_ids = response_ids[:valid_response_length]
data_source = data_item.non_tensor_batch["data_source"]
ground_truth = data_item.non_tensor_batch["reward_model"]["ground_truth"]
extra_info = data_item.non_tensor_batch.get("extra_info", {})
tool_extra_fields = data_item.non_tensor_batch.get("tool_extra_fields", None)
if tool_extra_fields is not None:
extra_info.update(tool_extra_fields.items())
num_turns = data_item.non_tensor_batch.get("__num_turns__", None)
rollout_reward_scores = data_item.non_tensor_batch.get("reward_scores", {})
extra_info["num_turns"] = num_turns
extra_info["rollout_reward_scores"] = rollout_reward_scores
response_str = await self.loop.run_in_executor(
None, lambda: self.tokenizer.decode(valid_response_ids, skip_special_tokens=True)
)
extra_reward_kwargs = (
{
"reward_router_address": self.reward_router_address,
"reward_model_tokenizer": self.reward_model_tokenizer,
}
if self.reward_router_address is not None
else {}
)
if self.is_async_reward_score:
result = await self.compute_score(
data_source=data_source,
solution_str=response_str,
ground_truth=ground_truth,
extra_info=extra_info,
**extra_reward_kwargs,
)
else:
result = await self.loop.run_in_executor(
None,
lambda: self.compute_score(
data_source=data_source,
solution_str=response_str,
ground_truth=ground_truth,
extra_info=extra_info,
**extra_reward_kwargs,
),
)
reward_extra_info = {}
score: float
if isinstance(result, dict):
score = result["score"]
for key, value in result.items():
reward_extra_info[key] = value
else:
score = result
reward_extra_info["acc"] = score
reward = score
return {"reward_score": reward, "reward_extra_info": reward_extra_info}
|
verl__experimental__reward_loop__reward_manager__naive.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable
from verl.experimental.reward_loop.reward_manager.base import RewardManagerBase
__all__ = ["register", "get_reward_manager_cls"]
REWARD_MANAGER: dict[str, type[RewardManagerBase]] = {}
def register(name: str) -> Callable[[type[RewardManagerBase]], type[RewardManagerBase]]:
"""Decorator to register a reward manager class with a given name.
Args:
name: `(str)`
The name of the reward manager.
"""
def decorator(cls: type[RewardManagerBase]) -> type[RewardManagerBase]:
if name in REWARD_MANAGER and REWARD_MANAGER[name] != cls:
raise ValueError(f"reward manager {name} has already been registered: {REWARD_MANAGER[name]} vs {cls}")
REWARD_MANAGER[name] = cls
return cls
return decorator
def get_reward_manager_cls(name: str) -> type[RewardManagerBase]:
"""Get the reward manager class with a given name.
Args:
name: `(str)`
The name of the reward manager.
Returns:
`(type)`: The reward manager class.
"""
if name not in REWARD_MANAGER:
raise ValueError(f"Unknown reward manager: {name}")
return REWARD_MANAGER[name]
|
verl__experimental__reward_loop__reward_manager__registry.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import itertools
import ray
from verl import DataProto
from verl.experimental.reward_loop.reward_manager import register
from verl.experimental.reward_loop.reward_manager.base import RewardManagerBase
from verl.utils.reward_score import default_compute_score
@ray.remote(num_cpus=1)
class RewardComputeWorker:
"""
WARNING: This class cannot have async methods.
"""
def __init__(self, compute_score_fn):
# since the reward function may not be pickleable, we need to init it in the worker
self.compute_score_fn = compute_score_fn
def compute_score(self, **kwargs) -> dict:
return self.compute_score_fn(**kwargs)
@register("remote")
class RemoteRewardManager(RewardManagerBase):
"""
The reward manager.
Some errors exist when using default thread pool to compute reward score, e.g., math-verify.
https://github.com/volcengine/verl/issues/3407
To avoid the above issues, we use a separate process to compute reward score.
Moreover, process may be more suitable for cpu-intensive requests.
"""
def __init__(self, config, tokenizer, compute_score, reward_router_address=None, reward_model_tokenizer=None):
super().__init__(config, tokenizer, compute_score)
self.compute_score = compute_score or default_compute_score
self.is_async_reward_score = inspect.iscoroutinefunction(self.compute_score)
assert not self.is_async_reward_score, "Async reward score is not supported in remote reward manager. "
self.reward_router_address = reward_router_address
self.reward_model_tokenizer = reward_model_tokenizer
num_reward_workers = config.reward.num_workers
# in the rollout & reward parallel mode
# the sum of final reward workers will be agent_loop_workers * num_reward_workers
self.reward_worker = [
# register the reward worker in the same node
RewardComputeWorker.options(
scheduling_strategy=ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy(
node_id=ray.get_runtime_context().get_node_id(),
soft=True,
),
).remote(self.compute_score)
for _ in range(num_reward_workers)
]
self.reward_worker_pool = itertools.cycle(self.reward_worker)
def choose_reward_worker(self):
return next(self.reward_worker_pool)
async def run_single(self, data: DataProto) -> dict:
assert len(data) == 1, "Only support single data item"
data_item = data[0]
response_ids = data_item.batch["responses"]
response_length = response_ids.shape[-1]
valid_response_length = data_item.batch["attention_mask"][-response_length:].sum()
valid_response_ids = response_ids[:valid_response_length]
data_source = data_item.non_tensor_batch["data_source"]
ground_truth = data_item.non_tensor_batch["reward_model"]["ground_truth"]
extra_info = data_item.non_tensor_batch.get("extra_info", {})
tool_extra_fields = data_item.non_tensor_batch.get("tool_extra_fields", None)
if tool_extra_fields is not None:
extra_info.update(tool_extra_fields.items())
num_turns = data_item.non_tensor_batch.get("__num_turns__", None)
rollout_reward_scores = data_item.non_tensor_batch.get("reward_scores", {})
extra_info["num_turns"] = num_turns
extra_info["rollout_reward_scores"] = rollout_reward_scores
response_str = await self.loop.run_in_executor(
None, lambda: self.tokenizer.decode(valid_response_ids, skip_special_tokens=True)
)
extra_reward_kwargs = (
{
"reward_router_address": self.reward_router_address,
"reward_model_tokenizer": self.reward_model_tokenizer,
}
if self.reward_router_address is not None
else {}
)
reward_worker = self.choose_reward_worker()
result = await reward_worker.compute_score.remote(
data_source=data_source,
solution_str=response_str,
ground_truth=ground_truth,
extra_info=extra_info,
**extra_reward_kwargs,
)
reward_extra_info = {}
score: float
if isinstance(result, dict):
score = result["score"]
for key, value in result.items():
reward_extra_info[key] = value
else:
score = result
reward_extra_info["acc"] = score
reward = score
return {"reward_score": reward, "reward_extra_info": reward_extra_info}
|
verl__experimental__reward_loop__reward_manager__remote.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import os
from verl.single_controller.ray.base import RayResourcePool, split_resource_pool
from verl.workers.config import HFModelConfig, RewardModelConfig
from verl.workers.rollout.replica import get_rollout_replica_class
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class RewardModelManager:
"""Reward model manager."""
def __init__(
self,
config: RewardModelConfig,
resource_pool: RayResourcePool = None,
):
"""
Initialize the reward model manager.
Args:
config (RewardModelConfig): Reward model configuration.
resource_pool (RayResourcePool, optional): Resource pool. Defaults to None.
"""
self.config = config
self.resource_pool = resource_pool
self._initialize_llm_servers()
self._initialize_router()
assert self.config.rollout.skip_tokenizer_init is False, "Reward model should not skip tokenizer init."
if self.config.rollout.free_cache_engine:
self.sleep()
def _initialize_llm_servers(self):
rollout_world_size = self.config.rollout.tensor_model_parallel_size
world_size = (
self.resource_pool.world_size
if self.resource_pool # colocate mode
else self.config.n_gpus_per_node * self.config.nnodes # standalone mode
)
num_replicas = world_size // rollout_world_size
rollout_replica_class = get_rollout_replica_class(self.config.rollout.name)
rollout_config = self.config.rollout
model_config = HFModelConfig(path=self.config.model_path)
self.tokenizer = model_config.get_processor()
self.rollout_replicas = [
rollout_replica_class(
replica_rank=replica_rank,
config=rollout_config,
model_config=model_config,
gpus_per_node=self.config.n_gpus_per_node,
is_reward_model=True,
)
for replica_rank in range(num_replicas)
]
if self.resource_pool:
split_resource_pools = split_resource_pool(self.resource_pool, split_size=rollout_world_size)
assert len(split_resource_pools) == len(self.rollout_replicas)
self._run_all(
[
server.init_colocated(resource_pool)
for server, resource_pool in zip(self.rollout_replicas, split_resource_pools, strict=True)
]
)
else:
self._run_all([server.init_standalone() for server in self.rollout_replicas])
self.server_handles = [server._server_handle for server in self.rollout_replicas]
self.server_addresses = [server._server_address for server in self.rollout_replicas]
def _initialize_router(self):
worker_urls = [f"http://{server_address}" for server_address in self.server_addresses]
# TODO (dyy): sglang router is not ready yet.
# if self.config.rollout.name == "sglang":
# from .router.inner_sglang_router import launch_router_process
# else:
# from .router.naive_router import launch_router_process
from .router.naive_router import launch_router_process
self.router_address, _ = launch_router_process(worker_urls=worker_urls)
def get_router_address(self):
return self.router_address
def wake_up(self):
"""Wake up all rollout replica instances."""
self._run_all([replica.wake_up() for replica in self.rollout_replicas])
def sleep(self):
"""Sleep all rollout replica instances."""
self._run_all([replica.sleep() for replica in self.rollout_replicas])
def _run_all(self, tasks: list[asyncio.Task]):
async def run_all():
await asyncio.gather(*tasks)
asyncio.run(run_all())
|
verl__experimental__reward_loop__reward_model.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import multiprocessing
import os
import time
import ray
import requests
from sglang_router.launch_server import RouterArgs, launch_router
from verl.utils.net_utils import get_free_port, is_valid_ipv6_address
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def launch_router_process(
worker_urls: list[str],
request_timeout: int = 180,
max_wait_time: int = 300,
timeout: int = 30,
) -> str:
router_ip = ray.util.get_node_ip_address().strip("[]")
router_port, _ = get_free_port(router_ip)
router_address = (
f"[{router_ip}]:{router_port}" if is_valid_ipv6_address(router_ip) else f"{router_ip}:{router_port}"
)
router_args = RouterArgs(
host=router_ip,
port=router_port,
worker_urls=worker_urls,
balance_abs_threshold=0,
log_level="warn",
request_timeout_secs=request_timeout,
)
router_process = multiprocessing.Process(target=launch_router, args=(router_args,))
router_process.daemon = True
router_process.start()
time.sleep(3)
assert router_process.is_alive()
# health check
start_time = time.time()
url = f"http://{router_address}/health"
with requests.Session() as session:
while time.time() - start_time < max_wait_time:
try:
response = session.get(url, timeout=timeout)
if response.status_code == 200:
break
except requests.RequestException as e:
logger.debug(f"Health check failed: {e}")
time.sleep(2)
else:
router_process.terminate()
raise RuntimeError(f"Router health check failed after {max_wait_time} seconds.")
logger.info(f"Router is running on {router_address}")
return router_address, router_process
|
verl__experimental__reward_loop__router__inner_sglang_router.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import multiprocessing
import os
import time
from typing import Any
import aiohttp
import ray
import uvicorn
from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse
from verl.utils.net_utils import get_free_port, is_valid_ipv6_address
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
async def _read_async_response(resp: aiohttp.ClientResponse) -> dict[str, Any]:
if resp.status == 204 or (resp.content_length == 0):
return {}
try:
return await resp.json(content_type=None)
except Exception:
try:
text = await resp.text()
except Exception:
return {}
return {
"content_type": (resp.headers.get("Content-Type") or ""),
"text": text,
}
def launch_router_process(
worker_urls: list[str],
):
router_ip = ray.util.get_node_ip_address().strip("[]")
router_port, _ = get_free_port(router_ip)
router_address = (
f"[{router_ip}]:{router_port}" if is_valid_ipv6_address(router_ip) else f"{router_ip}:{router_port}"
)
router_process = multiprocessing.Process(
target=run_router,
args=(
router_ip,
router_port,
worker_urls,
),
)
router_process.daemon = True
router_process.start()
time.sleep(3)
assert router_process.is_alive()
logger.info(f"Router is running on {router_address}")
return router_address, router_process
def run_router(router_ip: str, router_port: int, worker_urls: list[str]):
router = NaiveRouter(worker_urls=worker_urls, verbose=False)
uvicorn.run(router.app, host=router_ip, port=router_port, log_level="warning")
class NaiveRouter:
def __init__(
self,
worker_urls: list[str],
max_connections: int = 1024,
timeout: int = 60,
max_attempts: int = 3,
retry_delay: float = 2.0,
verbose: bool = False,
) -> None:
"""A minimal async load-balancing router."""
self.verbose = verbose
self.app = FastAPI()
self.worker_urls = worker_urls
self.request_counts = {url: 0 for url in worker_urls}
self.max_connections = max_connections
self.timeout = timeout
self.max_attempts = max_attempts
self.retry_delay = retry_delay
self.app = FastAPI()
# Register startup / shutdown hooks
self.app.on_event("startup")(self._on_startup)
self.app.on_event("shutdown")(self._on_shutdown)
# Catch-all proxy route
self.app.api_route("/{endpoint:path}", methods=["GET", "POST"])(self._make_async_request)
# Placeholder for aiohttp client
self.client = None
async def _on_startup(self):
"""Initialize aiohttp client safely inside the event loop"""
connector = aiohttp.TCPConnector(
limit=self.max_connections,
limit_per_host=self.max_connections // 4,
ttl_dns_cache=300,
use_dns_cache=True,
)
timeout = aiohttp.ClientTimeout(total=None)
self.client = aiohttp.ClientSession(connector=connector, timeout=timeout)
if self.verbose:
logger.info(f"[router] aiohttp client initialized with max_connections={self.max_connections}")
async def _on_shutdown(self):
"""Gracefully close aiohttp client"""
if self.client and not self.client.closed:
await self.client.close()
if self.verbose:
logger.info("[router] aiohttp client closed")
async def _make_async_request(self, request: Request, endpoint: str):
"""Proxy single request to a worker URL."""
if not self.worker_urls:
return JSONResponse(status_code=503, content={"error": "No available workers"})
worker_url = self._select_worker()
target_url = f"{worker_url}/{endpoint}"
if self.verbose:
logger.debug(f"[router] Forwarding request → {target_url}")
# Copy request data
body = await request.body()
headers = dict(request.headers)
for attempt in range(self.max_attempts):
# Send request to worker
try:
async with self.client.request(request.method, target_url, data=body, headers=headers) as response:
response.raise_for_status()
output = await _read_async_response(response)
self._release_worker(worker_url)
return output
except asyncio.TimeoutError:
logger.warning(f"Async request to {endpoint} timed out (attempt {attempt + 1})")
except aiohttp.ClientConnectorError:
logger.warning(f"Connection error for {endpoint} (attempt {attempt + 1})")
except aiohttp.ClientResponseError as e:
logger.error(f"HTTP error for {endpoint}: {e}")
raise
except Exception as e:
logger.error(f"Unexpected error for {endpoint}: {e}")
if attempt == self.max_attempts - 1:
raise
if attempt < self.max_attempts - 1:
await asyncio.sleep(self.retry_delay * (2**attempt))
raise RuntimeError(f"Failed to complete async request to {endpoint} after {self.max_attempts} attempts")
def _select_worker(self) -> str:
"""Select the least-loaded worker (simple round-robin by request count)."""
url = min(self.request_counts, key=self.request_counts.get)
self.request_counts[url] += 1
return url
def _release_worker(self, url: str) -> None:
"""Mark worker as free after request completes."""
self.request_counts[url] = max(0, self.request_counts[url] - 1)
|
verl__experimental__reward_loop__router__naive_router.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import time
import torch
import torch.distributed
from omegaconf import DictConfig
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from verl.experimental.fully_async_policy.base_detach_sync import BaseDetachNcclSync
from verl.single_controller.base.decorator import Dispatch, register
from verl.utils.device import (
get_device_name,
get_torch_device,
)
from verl.utils.fsdp_utils import fsdp_version
from verl.utils.megatron_utils import per_tensor_generator
from verl.workers.engine_workers import ActorRolloutRefWorker, TrainingWorker
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
device_name = get_device_name()
__all__ = ["DetachActorWorker", "DetachAsyncRolloutWorker", "TrainingWorker"]
class DetachNcclSync(BaseDetachNcclSync, ActorRolloutRefWorker):
def __init__(self, config: DictConfig, role: str):
BaseDetachNcclSync.__init__(self, config, role)
ActorRolloutRefWorker.__init__(self, config, role)
def _get_actor_params(self):
pass
def load_model_to_gpu(self):
if self.config.actor_rollout_ref.actor.strategy in ["fsdp", "fsdp2"]:
from verl.utils.fsdp_utils import load_fsdp_model_to_gpu
load_fsdp_model_to_gpu(self.actor_module_fsdp)
elif self.config.actor_rollout_ref.actor.strategy == "megatron":
from verl.utils.megatron_utils import load_megatron_model_to_gpu
load_megatron_model_to_gpu(self.actor_module, False)
else:
raise NotImplementedError(f"Unsupported strategy: {self.config.actor_rollout_ref.actor.strategy}")
def offload_model_to_cpu(self):
if self.config.actor_rollout_ref.actor.strategy in ["fsdp", "fsdp2"]:
from verl.utils.fsdp_utils import offload_fsdp_model_to_cpu
offload_fsdp_model_to_cpu(self.actor_module_fsdp)
elif self.config.actor_rollout_ref.actor.strategy == "megatron":
from verl.utils.megatron_utils import offload_megatron_model_to_cpu
offload_megatron_model_to_cpu(self.actor_module)
else:
raise NotImplementedError(f"Unsupported strategy: {self.config.actor_rollout_ref.actor.strategy}")
@register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False)
def sync_rollout_weights(self, sync_group_name="actor_rollout"):
# TODO: Refator this function for the chekpoint engine
assert (self._is_actor or self._is_rollout) and not self.config.hybrid_engine
assert hasattr(self, "_weights_info") and self._weights_info is not None
if self._is_actor and self.engine._is_offload_param:
self.load_model_to_gpu()
if self.config.actor_rollout_ref.actor.strategy in ["fsdp", "fsdp2"]:
params = self._get_actor_params() if self._is_actor else None
elif self.config.actor_rollout_ref.actor.strategy == "megatron":
params_generator = self._get_actor_params_generator() if self._is_actor else None
params = {key: tensor for key, tensor in params_generator} if params_generator is not None else None
else:
raise NotImplementedError(f"Unsupported strategy: {self.config.actor_rollout_ref.actor.strategy}")
rollout_name = self.config.rollout.name
inference_model = None
if self._is_rollout and (not self._is_actor):
if rollout_name == "vllm":
inference_model = BaseDetachNcclSync.get_inference_model(self.rollout)
from verl.utils.vllm.patch import patch_vllm_moe_model_weight_loader
patch_vllm_moe_model_weight_loader(inference_model)
elif rollout_name == "sglang":
inference_model = self.rollout._engine
# For ServerAdapter, _engine might be None and needs async initialization
if inference_model is None:
# Initialize the server adapter engine
print("[sync_rollout_weights] Initialize server adapter engine")
async def init_engine():
if hasattr(self.rollout, "_init_server_adapter"):
await self.rollout._init_server_adapter()
else:
print("[sync_rollout_weights] No _init_server_adapter method found")
return self.rollout._engine
inference_model = self._run_async_safely(init_engine())
if inference_model is None:
raise RuntimeError(
f"Failed to initialize rollout engine. "
f"rollout type: {type(self.rollout)}, "
f"has _init_server_adapter: {hasattr(self.rollout, '_init_server_adapter')}"
)
else:
raise NotImplementedError(f"Unknown rollout name: {rollout_name}")
if rollout_name == "sglang" and self._is_rollout:
self._sync_sglang_weights(inference_model, params, sync_group_name)
else:
self._sync_vllm_weights(inference_model, params, sync_group_name)
if self._is_actor and self.engine._is_offload_param:
self.offload_model_to_cpu()
get_torch_device().empty_cache()
def cache_actor_weights_to_cpu(self):
# TODO: Refator this function for the chekpoint engine
self.cpu_named_params = {}
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
if self._is_actor:
if self.config.actor_rollout_ref.actor.strategy in ["fsdp", "fsdp2"]:
params = self._get_actor_params()
for tensor_idx, (key, _, _) in enumerate(self._weights_info):
origin_data = params[key]
if hasattr(origin_data, "full_tensor"):
origin_data = origin_data.full_tensor()
if tensor_idx % world_size == local_rank:
self.cpu_named_params[key] = origin_data.to("cpu", non_blocking=True)
elif self.config.actor_rollout_ref.actor.strategy == "megatron":
params_generator = self._get_actor_params_generator()
print(f"cache_actor_weights_to_cpu, local_rank:{local_rank}, world_size:{world_size}")
for tensor_idx, (key, tensor) in enumerate(params_generator):
if tensor_idx % world_size == local_rank:
self.cpu_named_params[key] = tensor.to("cpu", non_blocking=True)
else:
raise NotImplementedError(f"Unsupported strategy: {self.config.actor_rollout_ref.actor.strategy}")
get_torch_device().synchronize()
@register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False)
def sync_rollout_weights_by_checkpoint(self, sync_group_name="actor_rollout"):
# TODO: Refator this function for the chekpoint engine
assert (self._is_actor or self._is_rollout) and not self.config.hybrid_engine
assert hasattr(self, "_weights_info") and self._weights_info is not None
# Load model to GPU
load_start_time = time.time()
if self._is_actor and self.engine._is_offload_param:
self.load_model_to_gpu()
load_duration = time.time() - load_start_time
from ray.util.collective import collective
# Cache actor weights to CPU and measure the time taken
cache_start_time = time.time()
self.cache_actor_weights_to_cpu()
cache_end_time = time.time()
cache_duration = cache_end_time - cache_start_time
# Register the cached weights into the checkpoint engine
self.checkpoint_engine.register_checkpoint(self._weights_info, self.cpu_named_params)
register_end_time = time.time()
register_duration = register_end_time - cache_end_time
self.cpu_named_params = {}
collective.barrier(group_name=sync_group_name)
update_start_time = time.time()
rollout_name = self.config.rollout.name
inference_model = None
if self._is_rollout and (not self._is_actor):
if rollout_name == "vllm":
inference_model = BaseDetachNcclSync.get_inference_model(self.rollout)
from verl.utils.vllm.patch import patch_vllm_moe_model_weight_loader
patch_vllm_moe_model_weight_loader(inference_model)
elif rollout_name == "sglang":
if self.config.actor_rollout_ref.actor.strategy in ["fsdp", "fsdp2"]:
raise NotImplementedError(
"Fully async sglang backend does not support "
f"actor strategy: {self.config.actor_rollout_ref.actor.strategy}"
)
inference_model = self.rollout._engine
# For ServerAdapter, _engine might be None and needs async initialization
if inference_model is None:
# Initialize the server adapter engine
print("[sync_rollout_weights] Initialize server adapter engine")
async def init_engine():
if hasattr(self.rollout, "_init_server_adapter"):
await self.rollout._init_server_adapter()
else:
print("[sync_rollout_weights] No _init_server_adapter method found")
return self.rollout._engine
inference_model = self._run_async_safely(init_engine())
if inference_model is None:
raise RuntimeError(
f"Failed to initialize rollout engine. "
f"rollout type: {type(self.rollout)}, "
f"has _init_server_adapter: {hasattr(self.rollout, '_init_server_adapter')}"
)
else:
raise NotImplementedError(f"Unknown rollout name: {rollout_name}")
# Update the checkpoint with the inference model and broadcast weights
self.checkpoint_engine.update_checkpoint(
inference_model=inference_model,
group_name=sync_group_name,
overlap_broadcast_and_consume=self.config.checkpoint_engine.overlap_broadcast_and_consume,
)
update_end_time = time.time()
update_duration = update_end_time - update_start_time
if self.config.actor_rollout_ref.actor.strategy == "megatron":
collective.barrier(group_name=sync_group_name)
offload_start_time = time.time()
if self._is_actor and self.engine._is_offload_param:
self.offload_model_to_cpu()
offload_duration = time.time() - offload_start_time
print(
f"sync_rollout_weights_by_checkpoint finish!, rank:{torch.distributed.get_rank()},"
f" is_actor:{self._is_actor}, is_rollout:{self._is_rollout},"
f" total cost:{update_end_time - cache_start_time} seconds, while cache cost {cache_duration} seconds, "
f" register cost {register_duration} seconds, update cost {update_duration} seconds"
)
if self._is_actor and self.engine._is_offload_param:
print(
f"sync_rollout_weights_by_checkpoint load model to gpu cost {load_duration} seconds,"
f" offload model to cpu cost {offload_duration} seconds"
)
class DetachActorWorker(DetachNcclSync):
def __init__(self, config: DictConfig, role: str):
print("[DetachAsyncRolloutWorker] Initializing via DetachNcclSync...")
DetachNcclSync.__init__(self, config, role)
self._strategy_handlers = None
self.copy_handler, self.restore_handler = self._get_strategy_handlers()
def _get_actor_params(self):
# TODO: Refator this function for the chekpoint engine
assert self._is_actor
params = self.actor_module_fsdp.state_dict()
from verl.utils.model import convert_weight_keys
params = convert_weight_keys(
params, getattr(self.actor_module_fsdp, "_fsdp_wrapped_module", self.actor_module_fsdp)
)
return params
def _get_actor_params_generator(self):
# TODO: Refator this function for the chekpoint engine
assert self._is_actor
if self.bridge is not None:
generator = self.bridge.export_weights(self.actor.actor_module)
else:
generator = per_tensor_generator(
self.actor.actor_module,
self.actor_model_config,
self.weight_converter,
self.tf_config,
self.layer_name_mapping,
)
return generator
def _get_strategy_handlers(self):
if self._strategy_handlers is not None:
return self._strategy_handlers
strategy = self.config.actor_rollout_ref.actor.strategy
if strategy in ["fsdp", "fsdp2"]:
from verl.experimental.fully_async_policy.fsdp2_utils import (
fsdp2_sharded_load_from_cpu,
fsdp2_sharded_save_to_cpu,
)
self._strategy_handlers = (fsdp2_sharded_save_to_cpu, fsdp2_sharded_load_from_cpu)
elif strategy == "megatron":
from verl.experimental.fully_async_policy.megatron_utils import (
copy_megatron_model_to_cpu,
restore_megatron_model_from_cpu,
)
self._strategy_handlers = (copy_megatron_model_to_cpu, restore_megatron_model_from_cpu)
else:
raise NotImplementedError(f"Unsupported strategy: {strategy}")
return self._strategy_handlers
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def get_actor_weights_info(self):
# TODO: Refator this function for the chekpoint engine
assert self._is_actor
if hasattr(self, "_weights_info"):
return self._weights_info
ret = []
if self.config.actor_rollout_ref.actor.strategy in ["fsdp", "fsdp2"]:
if fsdp_version(self.actor_module_fsdp) == 1:
from torch.distributed.fsdp.api import ShardedStateDictConfig, StateDictType
FSDP.set_state_dict_type(
self.actor_module_fsdp,
state_dict_type=StateDictType.SHARDED_STATE_DICT,
state_dict_config=ShardedStateDictConfig(),
)
params = self._get_actor_params()
for key, tensor in params.items():
ret.append((key, tensor.size(), tensor.dtype))
elif self.config.actor_rollout_ref.actor.strategy == "megatron":
if self.engine._is_offload_param:
self.load_model_to_gpu()
params_generator = self._get_actor_params_generator()
for key, tensor in params_generator:
ret.append((key, tensor.size(), tensor.dtype))
else:
raise NotImplementedError(f"Unsupported strategy: {self.config.actor_rollout_ref.actor.strategy}")
self._weights_info = ret
return ret
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def save_model_to_cpu(self, n):
if not hasattr(self, "cpu_saved_models"):
self.cpu_saved_models = {}
self.cpu_saved_models[n] = self.copy_handler(self.actor.engine.module)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def restore_model_from_cpu(self, n):
if n in self.cpu_saved_models:
strategy = self.config.actor_rollout_ref.actor.strategy
if strategy in ["fsdp", "fsdp2"]:
cpu_sharded_state, global_spec = self.cpu_saved_models[n]
self.restore_handler(self.actor.engine.module, cpu_sharded_state, global_spec)
else:
self.restore_handler(self.actor.engine.module, self.cpu_saved_models[n])
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def clear_cpu_model(self, n):
if n in self.cpu_saved_models:
del self.cpu_saved_models[n]
class DetachAsyncRolloutWorker(DetachNcclSync):
def __init__(self, config: DictConfig, role: str):
print(f"[DetachAsyncRolloutWorker] {DetachAsyncRolloutWorker.__mro__}")
DetachNcclSync.__init__(self, config, role)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def set_actor_weights_info(self, weights_info):
assert self._is_rollout
self._weights_info = weights_info
|
verl__experimental__separation__engine_workers.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PPO Trainer with Ray-based single controller.
This trainer supports model-agonistic model initialization with huggingface
"""
import uuid
from copy import deepcopy
from pprint import pprint
from typing import Any, Optional
import numpy as np
import torch
from omegaconf import OmegaConf
from torch.utils.data import Dataset, Sampler
from tqdm import tqdm
from verl import DataProto
from verl.checkpoint_engine import CheckpointEngineManager
from verl.experimental.dataset.sampler import AbstractCurriculumSampler
from verl.single_controller.ray import RayClassWithInitArgs, RayWorkerGroup, ResourcePoolManager
from verl.single_controller.ray.base import create_colocated_worker_cls
from verl.trainer.ppo.core_algos import AdvantageEstimator, agg_loss
from verl.trainer.ppo.metric_utils import (
compute_data_metrics,
compute_throughout_metrics,
compute_timing_metrics,
compute_variance_proxy_metrics,
)
from verl.trainer.ppo.ray_trainer import RayPPOTrainer, apply_kl_penalty, compute_advantage, compute_response_mask
from verl.trainer.ppo.reward import extract_reward
from verl.trainer.ppo.utils import Role, WorkerType
from verl.utils.checkpoint.checkpoint_manager import should_save_ckpt_esi
from verl.utils.config import omega_conf_to_dataclass
from verl.utils.debug import marked_timer
from verl.utils.metric import reduce_metrics
from verl.utils.rollout_skip import RolloutSkip
class SeparateRayPPOTrainer(RayPPOTrainer):
"""
Support for the initialization and fit process of Ray Trainer in the resource-separated scenario:
- Fully async policy
- One-step off-policy
"""
def __init__(
self,
config,
tokenizer,
role_worker_mapping: dict[Role, WorkerType],
resource_pool_manager: ResourcePoolManager,
ray_worker_group_cls: type[RayWorkerGroup] = RayWorkerGroup,
processor=None,
reward_fn=None,
val_reward_fn=None,
train_dataset: Optional[Dataset] = None,
val_dataset: Optional[Dataset] = None,
collate_fn=None,
train_sampler: Optional[Sampler] = None,
device_name=None,
):
super().__init__(
config,
tokenizer,
role_worker_mapping,
resource_pool_manager,
ray_worker_group_cls,
processor,
train_dataset,
val_dataset,
collate_fn,
train_sampler,
device_name,
)
self.global_steps = 0
self.epoch = 0
self.max_steps_duration = 0
self.progress_bar = None
self.logger = None
self.is_last_step = False
self.prev_step_profile = False
self.curr_step_profile = False
self.next_step_profile = False
self.last_val_metrics = {}
self.metrics = {}
self.timing_raw = {}
# reward message
self.reward_tensor = None
self.reward_extra_infos_dict = {}
def init_workers(self):
"""Initialize distributed training workers using Ray backend.
Creates:
1. Ray resource pools from configuration
2. Worker groups for each role (actor, critic, etc.)
"""
self._init_resource_pools()
self._create_worker_classes()
self._init_worker_groups()
self._init_models()
self._init_reward_loop()
self._init_async_rollout_manager()
self.checkpoint_manager = CheckpointEngineManager(
backend=self.config.actor_rollout_ref.rollout.checkpoint_engine.backend,
trainer=self.actor_rollout_wg,
replicas=self.async_rollout_manager.rollout_replicas,
)
def _init_resource_pools(self):
self.resource_pool_manager.create_resource_pool()
self.resource_pool_to_cls = {pool: {} for pool in self.resource_pool_manager.resource_pool_dict.values()}
def _create_worker_classes(self):
self._create_actor_rollout_classes()
self._create_critic_class()
self._create_reference_policy_class()
self._create_reward_model_class()
def _create_actor_rollout_classes(self):
raise NotImplementedError
def _create_critic_class(self):
# create critic
if self.use_critic:
resource_pool = self.resource_pool_manager.get_resource_pool(Role.Critic)
critic_cfg = omega_conf_to_dataclass(self.config.critic)
if self.use_legacy_worker_impl == "disable":
# convert critic_cfg into TrainingWorkerConfig
from verl.workers.config import FSDPEngineConfig
from verl.workers.engine_workers import TrainingWorkerConfig
self.orig_critic_cfg = critic_cfg
if self.orig_critic_cfg.strategy == "fsdp":
engine_config: FSDPEngineConfig = self.orig_critic_cfg.model.fsdp_config
engine_config.infer_max_token_len_per_gpu = critic_cfg.ppo_infer_max_token_len_per_gpu
engine_config.max_token_len_per_gpu = critic_cfg.ppo_max_token_len_per_gpu
else:
raise NotImplementedError(f"Unknown strategy {self.orig_critic_cfg.strategy=}")
critic_cfg = TrainingWorkerConfig(
model_type="value_model",
model_config=self.orig_critic_cfg.model_config,
engine_config=engine_config,
optimizer_config=self.orig_critic_cfg.optim,
checkpoint_config=self.orig_critic_cfg.checkpoint,
)
critic_cls = RayClassWithInitArgs(cls=self.role_worker_mapping[Role.Critic], config=critic_cfg)
self.resource_pool_to_cls[resource_pool][str(Role.Critic)] = critic_cls
def _create_reference_policy_class(self):
# create reference policy if needed
if self.use_reference_policy:
resource_pool = self.resource_pool_manager.get_resource_pool(Role.RefPolicy)
ref_policy_cls = RayClassWithInitArgs(
self.role_worker_mapping[Role.RefPolicy],
config=self.config.actor_rollout_ref,
role=str(Role.RefPolicy),
# profile_option=self.config.trainer.npu_profile.options,
)
self.resource_pool_to_cls[resource_pool][str(Role.RefPolicy)] = ref_policy_cls
def _create_reward_model_class(self):
# create a reward model if reward_fn is None
if self.use_rm:
# we create a RM here
resource_pool = self.resource_pool_manager.get_resource_pool(Role.RewardModel)
rm_cls = RayClassWithInitArgs(
self.role_worker_mapping[Role.RewardModel], config=self.config.reward.reward_model
)
self.resource_pool_to_cls[resource_pool][str(Role.RewardModel)] = rm_cls
def _init_worker_groups(self):
# initialize WorkerGroup
# NOTE: if you want to use a different resource pool for each role, which can support different parallel size,
# you should not use `create_colocated_worker_cls`.
# Instead, directly pass different resource pool to different worker groups.
# See https://github.com/volcengine/verl/blob/master/examples/ray/tutorial.ipynb for more information.
all_wg = {}
wg_kwargs = {} # Setting up kwargs for RayWorkerGroup
if OmegaConf.select(self.config.trainer, "ray_wait_register_center_timeout") is not None:
wg_kwargs["ray_wait_register_center_timeout"] = self.config.trainer.ray_wait_register_center_timeout
if OmegaConf.select(self.config.global_profiler, "steps") is not None:
wg_kwargs["profile_steps"] = OmegaConf.select(self.config.global_profiler, "steps")
# Only require nsight worker options when tool is nsys
if OmegaConf.select(self.config.global_profiler, "tool") == "nsys":
assert (
OmegaConf.select(self.config.global_profiler.global_tool_config.nsys, "worker_nsight_options")
is not None
), "worker_nsight_options must be set when using nsys with profile_steps"
wg_kwargs["worker_nsight_options"] = OmegaConf.to_container(
OmegaConf.select(self.config.global_profiler.global_tool_config.nsys, "worker_nsight_options")
)
wg_kwargs["device_name"] = self.device_name
for resource_pool, class_dict in self.resource_pool_to_cls.items():
worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict)
wg_dict = self.ray_worker_group_cls(
resource_pool=resource_pool,
ray_cls_with_init=worker_dict_cls,
**wg_kwargs,
)
spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys())
all_wg.update(spawn_wg)
self.all_wg = all_wg
def _init_models(self):
if self.use_critic:
self.critic_wg = self.all_wg[str(Role.Critic)]
if self.use_legacy_worker_impl == "disable":
self.critic_wg.reset()
# assign critic loss
from functools import partial
from verl.workers.utils.losses import value_loss
value_loss_ = partial(value_loss, config=self.orig_critic_cfg)
self.critic_wg.set_loss_fn(value_loss_)
else:
self.critic_wg.init_model()
if self.use_reference_policy and not self.ref_in_actor:
self.ref_policy_wg = self.all_wg[str(Role.RefPolicy)]
self.ref_policy_wg.init_model()
if self.use_rm:
self.rm_wg = self.all_wg[str(Role.RewardModel)]
self.rm_wg.init_model()
# we should create rollout at the end so that vllm can have a better estimation of kv cache memory
self.actor_rollout_wg = self.all_wg[str(Role.ActorRollout)]
self.actor_rollout_wg.init_model()
def _init_reward_loop(self):
from verl.experimental.reward_loop import RewardLoopManager
# initalize reward loop manager
# reward model (colocate or standalone): get resource_pool
# no reward model: resource_pool = None
resource_pool = self.resource_pool_manager.get_resource_pool(Role.RewardModel) if self.use_rm else None
self.reward_loop_manager = RewardLoopManager(
config=self.config,
rm_resource_pool=resource_pool,
)
def _init_async_rollout_manager(self):
pass
def fit(self):
"""
The training loop of PPO.
The driver process only need to call the compute functions of the worker group through RPC
to construct the PPO dataflow.
The light-weight advantage computation is done on the driver process.
!!!
The logic of fit is consistent with that of fit_refactor;
if any modifications are made, apply them to both methods simultaneously.
"""
from omegaconf import OmegaConf
from verl.utils.tracking import Tracking
self.logger = Tracking(
project_name=self.config.trainer.project_name,
experiment_name=self.config.trainer.experiment_name,
default_backend=self.config.trainer.logger,
config=OmegaConf.to_container(self.config, resolve=True),
)
self.global_steps = 0
# load checkpoint and update weights before doing anything
self._load_checkpoint()
self.checkpoint_manager.update_weights()
current_epoch = self.global_steps // len(self.train_dataloader)
# perform validation before training
# currently, we only support validation using the reward_function.
if self.config.trainer.get("val_before_train", True):
val_metrics = self._validate()
assert val_metrics, f"{val_metrics=}"
pprint(f"Initial validation metrics: {val_metrics}")
self.logger.log(data=val_metrics, step=self.global_steps)
if self.config.trainer.get("val_only", False):
return
if self.config.actor_rollout_ref.rollout.get("skip_rollout", False):
rollout_skip = RolloutSkip(self.config, self.actor_rollout_wg)
rollout_skip.wrap_generate_sequences()
# add tqdm
self.progress_bar = tqdm(total=self.total_training_steps, initial=self.global_steps, desc="Training Progress")
# we start from step 1
self.global_steps += 1
self.last_val_metrics = None
self.max_steps_duration = 0
self.prev_step_profile = False
self.curr_step_profile = (
self.global_steps in self.config.global_profiler.steps
if self.config.global_profiler.steps is not None
else False
)
self.next_step_profile = False
for epoch in range(current_epoch, self.config.trainer.total_epochs):
for batch_dict in self.train_dataloader:
self.epoch = epoch
self.fit_step(batch_dict)
if self.is_last_step:
return
def fit_step(self, batch_dict: Any = None):
"""
Single-step training template method. Handles all logic for one training step.
Flow:
1. Pre-step processing -> 2. Get batch -> 3. Generate sequences ->
4. Compute reward -> 5. Compute log_prob -> 6. Compute reward ->
7. Compute advantage -> 8. Update critic -> 9. Update actor -> 10. Post-step processing
Args:
batch_dict: Raw data dictionary
"""
self.metrics = {"training/global_step": self.global_steps, "training/epoch": self.epoch}
self.timing_raw = {}
# reward message
self.reward_tensor = None
self.reward_extra_infos_dict = {}
self._fit_prepare_step()
self._fit_start_profile()
with marked_timer("step", self.timing_raw):
batch = self._fit_get_batch(batch_dict)
batch = self._fit_generate(batch)
batch = self._fit_compute_reward(batch)
batch = self._fit_compute_log_prob(batch)
batch = self._fit_compute_ref_log_prob(batch)
batch = self._fit_compute_critic(batch)
batch = self._fit_compute_advantage(batch)
batch = self._fit_update_critic(batch)
batch = self._fit_update_actor(batch)
self._fit_update_weights()
self._fit_dump_data(batch)
self._fit_validate()
self._fit_save_checkpoint()
self._fit_stop_profile()
self._fit_collect_metrics(batch)
self._fit_torch_memory()
self._fit_experimental(batch)
self._fit_postprocess_step()
def _fit_prepare_step(self):
if hasattr(self.actor_rollout_wg, "async_calls_finalize_fn_exec"):
self.actor_rollout_wg.async_calls_finalize_fn_exec(blocking=False)
self.is_last_step = self.global_steps >= self.total_training_steps
def _fit_start_profile(self):
timing_raw = self.timing_raw
with marked_timer("start_profile", timing_raw):
self._start_profiling(
not self.prev_step_profile and self.curr_step_profile
if self.config.global_profiler.profile_continuous_steps
else self.curr_step_profile
)
def _fit_get_batch(self, batch_dict: dict) -> DataProto:
batch = DataProto.from_single_dict(batch_dict)
batch.meta_info["temperature"] = self.config.actor_rollout_ref.rollout.temperature
# add uid
batch.non_tensor_batch["uid"] = np.array([str(uuid.uuid4()) for _ in range(len(batch.batch))], dtype=object)
return batch
def _fit_generate(self, batch: DataProto = None) -> DataProto:
metrics = self.metrics
timing_raw = self.timing_raw
gen_batch = self._get_gen_batch(batch)
# pass global_steps to trace
gen_batch.meta_info["global_steps"] = self.global_steps
gen_batch_output = gen_batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True)
with marked_timer("gen", timing_raw, color="red"):
if self.curr_step_profile:
self.async_rollout_manager.start_profile(global_step=self.global_steps)
gen_batch_output = self.async_rollout_manager.generate_sequences(gen_batch_output)
self.checkpoint_manager.sleep_replicas()
if self.curr_step_profile:
self.async_rollout_manager.stop_profile()
timing_raw.update(gen_batch_output.meta_info["timing"])
gen_batch_output.meta_info.pop("timing", None)
if self.config.algorithm.adv_estimator == AdvantageEstimator.REMAX:
with marked_timer("gen_max", timing_raw, color="purple"):
gen_baseline_batch = deepcopy(gen_batch)
gen_baseline_batch.meta_info["do_sample"] = False
if self.curr_step_profile:
self.async_rollout_manager.start_profile()
gen_baseline_output = self.async_rollout_manager.generate_sequences(gen_baseline_batch)
self.checkpoint_manager.sleep_replicas()
if self.curr_step_profile:
self.async_rollout_manager.stop_profile()
batch = batch.union(gen_baseline_output)
# compute reward model score on batch
rm_scores = None
if self.use_rm and "rm_scores" not in batch.batch.keys():
batch_reward = self._compute_reward_colocate(batch)
batch = batch.union(batch_reward)
# Compute or extract reward for REMAX baseline
reward_baseline_tensor = batch.batch["rm_scores"].sum(dim=-1)
keys_to_pop = set(gen_baseline_output.batch.keys())
if rm_scores is not None:
keys_to_pop.update(rm_scores.batch.keys())
batch.pop(batch_keys=list(keys_to_pop))
batch.batch["reward_baselines"] = reward_baseline_tensor
del rm_scores, gen_baseline_batch, gen_baseline_output
# repeat to align with repeated responses in rollout
batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True)
batch = batch.union(gen_batch_output)
if "response_mask" not in batch.batch.keys():
batch.batch["response_mask"] = compute_response_mask(batch)
# Balance the number of valid tokens across DP ranks.
# NOTE: This usually changes the order of data in the `batch`,
# which won't affect the advantage calculation (since it's based on uid),
# but might affect the loss calculation (due to the change of mini-batching).
if self.config.trainer.balance_batch:
self._balance_batch(batch, metrics=metrics)
# compute global_valid tokens
batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist()
# get images_seqlens
images_seqlens_all = []
for multi_modal_input in batch.non_tensor_batch["multi_modal_inputs"]:
if "image_grid_thw" not in multi_modal_input.keys():
continue
images_seqlens_all.extend(multi_modal_input["images_seqlens"].tolist())
batch.meta_info["images_seqlens"] = images_seqlens_all
return batch
def _fit_compute_reward(self, batch: DataProto) -> DataProto:
timing_raw = self.timing_raw
with marked_timer("reward", timing_raw, color="yellow"):
# compute reward model score
if self.use_rm and "rm_scores" not in batch.batch.keys():
batch_reward = self._compute_reward_colocate(batch)
batch = batch.union(batch_reward)
# Compute or extract reward_tensor and reward_extra_infos_dict for training
reward_tensor, reward_extra_infos_dict = extract_reward(batch)
self.reward_tensor = reward_tensor
self.reward_extra_infos_dict = reward_extra_infos_dict
return batch
def _fit_compute_log_prob(self, batch: DataProto) -> DataProto:
metrics = self.metrics
timing_raw = self.timing_raw
# Operating Mode Selection:
# - Bypass mode: Sets old_log_probs = rollout_log_probs (2 policies: π_rollout, π_θ)
# - Decoupled mode: Recomputes old_log_probs as proximal anchor (3 policies: π_rollout, π_old, π_θ)
# Note: π_old computed once per data batch, serves as stable reference during mini-batch updates
rollout_corr_config = self.config.algorithm.get("rollout_correction", None)
bypass_recomputing_logprobs = rollout_corr_config and rollout_corr_config.get("bypass_mode", False)
if bypass_recomputing_logprobs: # Use `rollout_log_probs`
from verl.trainer.ppo.rollout_corr_helper import apply_bypass_mode
apply_bypass_mode(
batch=batch,
rollout_corr_config=rollout_corr_config,
policy_loss_config=self.config.actor_rollout_ref.actor.policy_loss,
)
else: # Recompute old_log_probs
with marked_timer("old_log_prob", timing_raw, color="blue"):
old_log_prob, old_log_prob_mfu = self._compute_old_log_prob(batch)
entropys = old_log_prob.batch["entropys"]
response_masks = batch.batch["response_mask"]
actor_config = self.config.actor_rollout_ref.actor
entropy_agg = agg_loss(
loss_mat=entropys,
loss_mask=response_masks,
loss_agg_mode=actor_config.loss_agg_mode,
loss_scale_factor=actor_config.loss_scale_factor,
)
old_log_prob_metrics = {
"actor/entropy": entropy_agg.detach().item(),
"perf/mfu/actor_infer": old_log_prob_mfu,
}
metrics.update(old_log_prob_metrics)
old_log_prob.batch.pop("entropys")
if "routed_experts" in batch.batch and "routed_experts" in old_log_prob.batch:
router_mode = getattr(self.config.actor_rollout_ref.actor.router_replay, "mode", "disabled")
if router_mode == "R2":
batch.batch.pop("routed_experts")
else:
old_log_prob.batch.pop("routed_experts")
batch = batch.union(old_log_prob)
if "rollout_log_probs" in batch.batch.keys():
# TODO: we may want to add diff of probs too.
from verl.utils.debug.metrics import calculate_debug_metrics
metrics.update(calculate_debug_metrics(batch))
assert "old_log_probs" in batch.batch, f'"old_log_prob" not in {batch.batch.keys()=}'
return batch
def _fit_compute_ref_log_prob(self, batch: DataProto) -> DataProto:
timing_raw = self.timing_raw
if self.use_reference_policy:
with marked_timer(str(Role.RefPolicy), timing_raw, color="olive"):
ref_log_prob = self._compute_ref_log_prob(batch)
batch = batch.union(ref_log_prob)
return batch
def _fit_compute_critic(self, batch: DataProto) -> DataProto:
timing_raw = self.timing_raw
if self.use_critic:
with marked_timer("values", timing_raw, color="cyan"):
values = self._compute_values(batch)
batch = batch.union(values)
return batch
def _fit_compute_advantage(self, batch) -> DataProto:
metrics = self.metrics
timing_raw = self.timing_raw
reward_tensor = self.reward_tensor
reward_extra_infos_dict = self.reward_extra_infos_dict
with marked_timer("adv", timing_raw, color="brown"):
# we combine with rule-based rm
reward_extra_infos_dict: dict[str, list]
batch.batch["token_level_scores"] = reward_tensor
if reward_extra_infos_dict:
batch.non_tensor_batch.update({k: np.array(v) for k, v in reward_extra_infos_dict.items()})
# compute rewards. apply_kl_penalty if available
if self.config.algorithm.use_kl_in_reward:
batch, kl_metrics = apply_kl_penalty(
batch, kl_ctrl=self.kl_ctrl_in_reward, kl_penalty=self.config.algorithm.kl_penalty
)
metrics.update(kl_metrics)
else:
batch.batch["token_level_rewards"] = batch.batch["token_level_scores"]
# Compute rollout correction: IS weights, rejection sampling, and metrics
# Only runs in decoupled mode (computes once per batch using stable π_old)
# In bypass mode, this is skipped - actor computes metrics from evolving π_θ vs π_rollout
rollout_corr_config = self.config.algorithm.get("rollout_correction", None)
bypass_recomputing_logprobs = rollout_corr_config and rollout_corr_config.get("bypass_mode", False)
if (
rollout_corr_config is not None
and "rollout_log_probs" in batch.batch
and not bypass_recomputing_logprobs # Only in decoupled mode
):
from verl.trainer.ppo.rollout_corr_helper import compute_rollout_correction_and_add_to_batch
# Compute IS weights, apply rejection sampling, compute metrics
batch, is_metrics = compute_rollout_correction_and_add_to_batch(batch, rollout_corr_config)
# IS and off-policy metrics already have rollout_corr/ prefix
metrics.update(is_metrics)
# compute advantages, executed on the driver process
norm_adv_by_std_in_grpo = self.config.algorithm.get(
"norm_adv_by_std_in_grpo", True
) # GRPO adv normalization factor
batch = compute_advantage(
batch,
adv_estimator=self.config.algorithm.adv_estimator,
gamma=self.config.algorithm.gamma,
lam=self.config.algorithm.lam,
num_repeat=self.config.actor_rollout_ref.rollout.n,
norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo,
config=self.config.algorithm,
)
return batch
def _fit_update_critic(self, batch: DataProto) -> DataProto:
metrics = self.metrics
timing_raw = self.timing_raw
if self.use_critic:
with marked_timer("update_critic", timing_raw, color="pink"):
critic_output = self._update_critic(batch)
critic_output_metrics = reduce_metrics(critic_output.meta_info["metrics"])
metrics.update(critic_output_metrics)
return batch
def _fit_update_actor(self, batch: DataProto) -> DataProto:
metrics = self.metrics
timing_raw = self.timing_raw
# implement critic warmup
if self.config.trainer.critic_warmup <= self.global_steps:
# update actor
with marked_timer("update_actor", timing_raw, color="red"):
actor_output = self._update_actor(batch)
actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"])
metrics.update(actor_output_metrics)
return batch
def _fit_update_weights(self):
timing_raw = self.timing_raw
if self.config.trainer.critic_warmup <= self.global_steps:
# update weights from trainer to rollout
with marked_timer("update_weights", timing_raw, color="red"):
self.checkpoint_manager.update_weights()
def _fit_dump_data(self, batch: DataProto):
timing_raw = self.timing_raw
reward_extra_infos_dict = self.reward_extra_infos_dict
# Log rollout generations if enabled
rollout_data_dir = self.config.trainer.get("rollout_data_dir", None)
if rollout_data_dir:
self._log_rollout_data(batch, reward_extra_infos_dict, timing_raw, rollout_data_dir)
def _fit_validate(self):
metrics = self.metrics
timing_raw = self.timing_raw
if self.config.trainer.test_freq > 0 and (
self.is_last_step or self.global_steps % self.config.trainer.test_freq == 0
):
with marked_timer("testing", timing_raw, color="green"):
val_metrics: dict = self._validate()
if self.is_last_step:
self.last_val_metrics = val_metrics
metrics.update(val_metrics)
def _fit_save_checkpoint(self):
timing_raw = self.timing_raw
# Check if the ESI (Elastic Server Instance)/training plan is close to expiration.
esi_close_to_expiration = should_save_ckpt_esi(
max_steps_duration=self.max_steps_duration,
redundant_time=self.config.trainer.esi_redundant_time,
)
# Check if the conditions for saving a checkpoint are met.
# The conditions include a mandatory condition (1) and
# one of the following optional conditions (2/3/4):
# 1. The save frequency is set to a positive value.
# 2. It's the last training step.
# 3. The current step number is a multiple of the save frequency.
# 4. The ESI(Elastic Server Instance)/training plan is close to expiration.
if self.config.trainer.save_freq > 0 and (
self.is_last_step or self.global_steps % self.config.trainer.save_freq == 0 or esi_close_to_expiration
):
if esi_close_to_expiration:
print("Force saving checkpoint: ESI instance expiration approaching.")
with marked_timer("save_checkpoint", timing_raw, color="green"):
# sleep replicas to avoid OOM during checkpoint saving
# self.checkpoint_manager.sleep_replicas()
self._save_checkpoint()
# wake replicas to avoid OOM during checkpoint saving
# TODO: Check separation is needed.
# self.checkpoint_manager.update_weights()
def _fit_stop_profile(self):
timing_raw = self.timing_raw
with marked_timer("stop_profile", timing_raw):
self.next_step_profile = (
self.global_steps + 1 in self.config.global_profiler.steps
if self.config.global_profiler.steps is not None
else False
)
self._stop_profiling(
self.curr_step_profile and not self.next_step_profile
if self.config.global_profiler.profile_continuous_steps
else self.curr_step_profile
)
self.prev_step_profile = self.curr_step_profile
self.curr_step_profile = self.next_step_profile
def _fit_collect_metrics(self, batch):
metrics = self.metrics
timing_raw = self.timing_raw
# collect metrics
metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic))
metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw))
# TODO: implement actual tflpo and theoretical tflpo
n_gpus = self.resource_pool_manager.get_n_gpus()
metrics.update(compute_throughout_metrics(batch=batch, timing_raw=timing_raw, n_gpus=n_gpus))
# compute variance proxy metrics
gradient_norm = metrics.get("actor/grad_norm", None)
metrics.update(compute_variance_proxy_metrics(batch=batch, gradient_norm=gradient_norm))
def _fit_torch_memory(self):
if (
hasattr(self.config.actor_rollout_ref.actor, "profiler")
and self.config.actor_rollout_ref.actor.profiler.tool == "torch_memory"
):
self.actor_rollout_wg.dump_memory_snapshot(
tag=f"post_update_step{self.global_steps}", sub_dir=f"step{self.global_steps}"
)
def _fit_experimental(self, batch):
# this is experimental and may be changed/removed in the future in favor of a general-purpose one
if isinstance(self.train_dataloader.sampler, AbstractCurriculumSampler):
self.train_dataloader.sampler.update(batch=batch)
# this is experimental and may be changed/removed in the future
# in favor of a general-purpose data buffer pool
if hasattr(self.train_dataset, "on_batch_end"):
# The dataset may be changed after each training batch
self.train_dataset.on_batch_end(batch=batch)
def _fit_postprocess_step(self):
metrics = self.metrics
timing_raw = self.timing_raw
steps_duration = timing_raw["step"]
self.max_steps_duration = max(self.max_steps_duration, steps_duration)
# TODO: make a canonical logger that supports various backend
self.logger.log(data=metrics, step=self.global_steps)
self.progress_bar.update(1)
self.global_steps += 1
if self.is_last_step:
if hasattr(self.actor_rollout_wg, "async_calls_finalize_fn_exec"):
self.actor_rollout_wg.async_calls_finalize_fn_exec(blocking=True)
pprint(f"Final validation metrics: {self.last_val_metrics}")
self.progress_bar.close()
|
verl__experimental__separation__ray_trainer.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import numpy as np
import ray
from transfer_queue import BatchMeta
import verl.experimental.agent_loop.agent_loop as agent_loop
class AgentLoopManager(agent_loop.AgentLoopManager):
def generate_sequences(self, prompts: BatchMeta) -> BatchMeta:
"""Split input batch and dispatch to agent loop workers.
Args:
prompts (BatchMeta): Input batch.
Returns:
BatchMeta: Output batch metadata.
"""
chunkes = prompts.chunk(len(self.agent_loop_workers))
outputs = ray.get(
[
worker.generate_sequences.remote(chunk)
for worker, chunk in zip(self.agent_loop_workers, chunkes, strict=True)
]
)
output = BatchMeta.concat(outputs)
# calculate performance metrics
metrics = [output.extra_info.pop("metrics") for output in outputs] # List[List[Dict[str, str]]]
timing = self._performance_metrics(metrics, output)
output.set_extra_info("timing", timing)
return output
def _performance_metrics(self, metrics: list[list[dict[str, str]]], output: BatchMeta) -> dict[str, float]:
timing = {}
t_generate_sequences = np.array([metric["generate_sequences"] for chunk in metrics for metric in chunk])
t_tool_calls = np.array([metric["tool_calls"] for chunk in metrics for metric in chunk])
timing["agent_loop/generate_sequences/min"] = t_generate_sequences.min()
timing["agent_loop/generate_sequences/max"] = t_generate_sequences.max()
timing["agent_loop/generate_sequences/mean"] = t_generate_sequences.mean()
timing["agent_loop/tool_calls/min"] = t_tool_calls.min()
timing["agent_loop/tool_calls/max"] = t_tool_calls.max()
timing["agent_loop/tool_calls/mean"] = t_tool_calls.mean()
# TODO (TQ): initialize tq during init when enable TQ switch is stable
tq_client = self._create_transferqueue_client()
# batch sequence generation is bounded by the slowest sample
slowest = np.argmax(t_generate_sequences + t_tool_calls)
attention_mask = asyncio.run(tq_client.async_get_data(output[slowest]))["attention_mask"]
prompt_length = output.samples[0].fields["prompts"].shape[0]
timing["agent_loop/slowest/generate_sequences"] = t_generate_sequences[slowest]
timing["agent_loop/slowest/tool_calls"] = t_tool_calls[slowest]
timing["agent_loop/slowest/prompt_length"] = attention_mask[:prompt_length].sum().item()
timing["agent_loop/slowest/response_length"] = attention_mask[prompt_length:].sum().item()
return timing
def create_transferqueue_client_for_workers(self):
# TODO (TQ): initialize tq during worker init when enable TQ switch is stable
ray.get([worker.create_transferqueue_client.remote() for worker in self.agent_loop_workers])
def _create_transferqueue_client(self):
"""Create a client for data system (TransferQueue)."""
from verl.single_controller.ray.base import get_random_string
from verl.utils.transferqueue_utils import create_transferqueue_client
client_name = get_random_string(length=6)
tq_client = create_transferqueue_client(
client_id=f"AgentLoopManager_{client_name}",
config=self.config.transfer_queue,
)
return tq_client
|
verl__experimental__transfer_queue__agent_loop.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Note that we don't combine the main with ray_trainer as ray_trainer is used by other main.
"""
import os
import socket
import hydra
import ray
from omegaconf import OmegaConf
from verl.trainer.constants_ppo import get_ppo_ray_runtime_env
from verl.trainer.main_ppo import TaskRunner as MainTaskRunner
from verl.trainer.main_ppo import create_rl_dataset, create_rl_sampler
from verl.trainer.ppo.utils import need_critic, need_reference_policy
from verl.utils.config import validate_config
from verl.utils.device import auto_set_device, is_cuda_available
from .ray_trainer import RayPPOTrainer
@hydra.main(config_path="config", config_name="ppo_trainer", version_base=None)
def main(config):
"""Main entry point for PPO training with Hydra configuration management.
Args:
config_dict: Hydra configuration dictionary containing training parameters.
"""
# Automatically set `config.trainer.device = npu` when running on Ascend NPU.
auto_set_device(config)
run_ppo(config)
# Define a function to run the PPO-like training process
def run_ppo(config, task_runner_class=None) -> None:
"""Initialize Ray cluster and run distributed PPO training process.
Args:
config: Training configuration object containing all necessary parameters
for distributed PPO training including Ray initialization settings,
model paths, and training hyperparameters.
task_runner_class: For recipe to change TaskRunner.
"""
# Check if Ray is not initialized
if not ray.is_initialized():
# Initialize Ray with a local cluster configuration
# Set environment variables in the runtime environment to control tokenizer parallelism,
# NCCL debug level, VLLM logging level, and allow runtime LoRA updating
# `num_cpus` specifies the number of CPU cores Ray can use, obtained from the configuration
default_runtime_env = get_ppo_ray_runtime_env()
ray_init_kwargs = config.ray_kwargs.get("ray_init", {})
runtime_env_kwargs = ray_init_kwargs.get("runtime_env", {})
if config.transfer_queue.enable:
# Add runtime environment variables for transfer queue
runtime_env_vars = runtime_env_kwargs.get("env_vars", {})
runtime_env_vars["TRANSFER_QUEUE_ENABLE"] = "1"
runtime_env_kwargs["env_vars"] = runtime_env_vars
runtime_env = OmegaConf.merge(default_runtime_env, runtime_env_kwargs)
ray_init_kwargs = OmegaConf.create({**ray_init_kwargs, "runtime_env": runtime_env})
print(f"ray init kwargs: {ray_init_kwargs}")
ray.init(**OmegaConf.to_container(ray_init_kwargs))
if task_runner_class is None:
task_runner_class = ray.remote(num_cpus=1)(TaskRunner) # please make sure main_task is not scheduled on head
# Create a remote instance of the TaskRunner class, and
# Execute the `run` method of the TaskRunner instance remotely and wait for it to complete
if (
is_cuda_available
and config.global_profiler.tool == "nsys"
and config.global_profiler.get("steps") is not None
and len(config.global_profiler.get("steps", [])) > 0
):
from verl.utils.import_utils import is_nvtx_available
assert is_nvtx_available(), "nvtx is not available in CUDA platform. Please 'pip3 install nvtx'"
nsight_options = OmegaConf.to_container(
config.global_profiler.global_tool_config.nsys.controller_nsight_options
)
runner = task_runner_class.options(runtime_env={"nsight": nsight_options}).remote()
else:
runner = task_runner_class.remote()
ray.get(runner.run.remote(config))
# [Optional] get the path of the timeline trace file from the configuration, default to None
# This file is used for performance analysis
timeline_json_file = config.ray_kwargs.get("timeline_json_file", None)
if timeline_json_file:
ray.timeline(filename=timeline_json_file)
class TaskRunner(MainTaskRunner):
def run(self, config):
"""Execute the main PPO training workflow.
This method sets up the distributed training environment, initializes
workers, datasets, and reward functions, then starts the training process.
Args:
config: Training configuration object containing all parameters needed
for setting up and running the PPO training process.
"""
# Print the initial configuration. `resolve=True` will evaluate symbolic values.
from pprint import pprint
from verl.utils.fs import copy_to_local
print(f"TaskRunner hostname: {socket.gethostname()}, PID: {os.getpid()}")
pprint(OmegaConf.to_container(config, resolve=True))
OmegaConf.resolve(config)
actor_rollout_cls, ray_worker_group_cls = self.add_actor_rollout_worker(config)
self.add_critic_worker(config)
# We should adopt a multi-source reward function here:
# - for rule-based rm, we directly call a reward score
# - for model-based rm, we call a model
# - for code related prompt, we send to a sandbox if there are test cases
# finally, we combine all the rewards together
# The reward type depends on the tag of the data
self.add_reward_model_resource_pool(config)
# Add a reference policy worker if KL loss or KL reward is used.
self.add_ref_policy_worker(config, actor_rollout_cls)
# validate config
validate_config(
config=config,
use_reference_policy=need_reference_policy(config),
use_critic=need_critic(config),
)
# Download the checkpoint from HDFS to the local machine.
# `use_shm` determines whether to use shared memory, which could lead to faster model loading if turned on
local_path = copy_to_local(
config.actor_rollout_ref.model.path, use_shm=config.actor_rollout_ref.model.get("use_shm", False)
)
# Instantiate the tokenizer and processor.
from verl.utils import hf_processor, hf_tokenizer
trust_remote_code = config.data.get("trust_remote_code", False)
tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code)
# Used for multimodal LLM, could be None
processor = hf_processor(local_path, trust_remote_code=trust_remote_code, use_fast=True)
resource_pool_manager = self.init_resource_pool_mgr(config)
from verl.utils.dataset.rl_dataset import collate_fn
# Create training and validation datasets.
train_dataset = create_rl_dataset(config.data.train_files, config.data, tokenizer, processor, is_train=True)
val_dataset = create_rl_dataset(config.data.val_files, config.data, tokenizer, processor, is_train=False)
train_sampler = create_rl_sampler(config.data, train_dataset)
# Initialize the PPO trainer.
trainer = RayPPOTrainer(
config=config,
tokenizer=tokenizer,
processor=processor,
role_worker_mapping=self.role_worker_mapping,
resource_pool_manager=resource_pool_manager,
ray_worker_group_cls=ray_worker_group_cls,
train_dataset=train_dataset,
val_dataset=val_dataset,
collate_fn=collate_fn,
train_sampler=train_sampler,
)
# Initialize the workers of the trainer.
trainer.init_workers()
# Start the training process.
trainer.fit()
if __name__ == "__main__":
main()
|
verl__experimental__transfer_queue__main_ppo.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PPO Trainer with Ray-based single controller.
This trainer supports model-agonistic model initialization with huggingface
"""
import json
import logging
import math
import os
import uuid
from collections import defaultdict
from pprint import pprint
from typing import Any, Optional
import numpy as np
import tensordict
import torch
from omegaconf import OmegaConf, open_dict
from packaging.version import parse as parse_version
from tensordict import TensorDict
from torch.utils.data import Dataset, Sampler
from torchdata.stateful_dataloader import StatefulDataLoader
from tqdm import tqdm
from transfer_queue import (
BatchMeta,
SimpleStorageUnit,
TransferQueueController,
get_placement_group,
process_zmq_server_info,
)
from verl import DataProto
from verl.checkpoint_engine import CheckpointEngineManager
from verl.experimental.dataset.sampler import AbstractCurriculumSampler
from verl.single_controller.ray import RayClassWithInitArgs, RayWorkerGroup, ResourcePoolManager
from verl.single_controller.ray.base import create_colocated_worker_cls
from verl.trainer.config import AlgoConfig
from verl.trainer.ppo import core_algos
from verl.trainer.ppo.core_algos import AdvantageEstimator, agg_loss
from verl.trainer.ppo.metric_utils import (
compute_data_metrics,
compute_throughout_metrics,
compute_timing_metrics,
process_validation_metrics,
)
from verl.trainer.ppo.utils import Role, WorkerType, need_critic, need_reference_policy, need_reward_model
from verl.utils.checkpoint.checkpoint_manager import find_latest_ckpt_path, should_save_ckpt_esi
from verl.utils.config import omega_conf_to_dataclass
from verl.utils.debug import marked_timer
from verl.utils.metric import reduce_metrics
from verl.utils.rollout_skip import RolloutSkip
from verl.utils.seqlen_balancing import calculate_workload, get_seqlen_balanced_partitions, log_seqlen_unbalance
from verl.utils.torch_functional import masked_mean
from verl.utils.tracking import ValidationGenerationsLogger
from verl.utils.transferqueue_utils import create_transferqueue_client, get_transferqueue_client, tqbridge
@tqbridge(put_data=False)
def compute_reward_decorated(data):
reward_tensor = data.batch["rm_scores"]
reward_extra_keys = data.meta_info.get("reward_extra_keys", [])
reward_extra_info = {key: data.non_tensor_batch[key] for key in reward_extra_keys}
return reward_tensor, reward_extra_info
@tqbridge(put_data=False)
def apply_kl_penalty(data: DataProto, kl_ctrl: core_algos.AdaptiveKLController, kl_penalty="kl"):
"""Apply KL penalty to the token-level rewards.
This function computes the KL divergence between the reference policy and current policy,
then applies a penalty to the token-level rewards based on this divergence.
Args:
data (DataProto): The data containing batched model outputs and inputs.
kl_ctrl (core_algos.AdaptiveKLController): Controller for adaptive KL penalty.
kl_penalty (str, optional): Type of KL penalty to apply. Defaults to "kl".
Returns:
tuple: A tuple containing:
- The updated data with token-level rewards adjusted by KL penalty
- A dictionary of metrics related to the KL penalty
"""
response_mask = data.batch["response_mask"]
token_level_scores = data.batch["token_level_scores"]
batch_size = data.batch.batch_size[0]
# compute kl between ref_policy and current policy
# When apply_kl_penalty, algorithm.use_kl_in_reward=True, so the reference model has been enabled.
kld = core_algos.kl_penalty(
data.batch["old_log_probs"], data.batch["ref_log_prob"], kl_penalty=kl_penalty
) # (batch_size, response_length)
kld = kld * response_mask
beta = kl_ctrl.value
token_level_rewards = token_level_scores - beta * kld
current_kl = masked_mean(kld, mask=response_mask, axis=-1) # average over sequence
current_kl = torch.mean(current_kl, dim=0).item()
# according to https://github.com/huggingface/trl/blob/951ca1841f29114b969b57b26c7d3e80a39f75a0/trl/trainer/ppo_trainer.py#L837
kl_ctrl.update(current_kl=current_kl, n_steps=batch_size)
metrics = {"actor/reward_kl_penalty": current_kl, "actor/reward_kl_penalty_coeff": beta}
return token_level_rewards, metrics
def compute_response_mask(batch_meta: BatchMeta, tq_client):
"""Compute the attention mask for the response part of the sequence.
This function extracts the portion of the attention mask that corresponds to the model's response,
which is used for masking computations that should only apply to response tokens.
Args:
batch_meta (BatchMeta): The data containing batched model outputs and inputs.
Returns:
BatchMeta: The BatchMeta of attention mask for the response tokens.
"""
data = tq_client.get_data(batch_meta)
responses = data["responses"]
response_length = responses.size(1)
attention_mask = data["attention_mask"]
response_mask = attention_mask[:, -response_length:]
output = TensorDict({"response_mask": response_mask}, batch_size=response_mask.size(0))
batch_meta = tq_client.put(data=output, metadata=batch_meta)
return batch_meta
@tqbridge(put_data=False)
def compute_advantage(
data: DataProto,
adv_estimator: AdvantageEstimator,
gamma: float = 1.0,
lam: float = 1.0,
num_repeat: int = 1,
norm_adv_by_std_in_grpo: bool = True,
config: Optional[AlgoConfig] = None,
) -> tuple[Any, Any]:
"""Compute advantage estimates for policy optimization.
This function computes advantage estimates using various estimators like GAE, GRPO, REINFORCE++, etc.
The advantage estimates are used to guide policy optimization in RL algorithms.
Args:
data (DataProto): The data containing batched model outputs and inputs.
adv_estimator (AdvantageEstimator): The advantage estimator to use (e.g., GAE, GRPO, REINFORCE++).
gamma (float, optional): Discount factor for future rewards. Defaults to 1.0.
lam (float, optional): Lambda parameter for GAE. Defaults to 1.0.
num_repeat (int, optional): Number of times to repeat the computation. Defaults to 1.
norm_adv_by_std_in_grpo (bool, optional): Whether to normalize advantages by standard deviation in
GRPO. Defaults to True.
config (dict, optional): Configuration dictionary for algorithm settings. Defaults to None.
Returns:
tuple: A tuple containing:
- advantages: The computed advantage estimates.
- returns: The computed returns.
"""
# prepare response group
if adv_estimator == AdvantageEstimator.GAE:
# Compute advantages and returns using Generalized Advantage Estimation (GAE)
advantages, returns = core_algos.compute_gae_advantage_return(
token_level_rewards=data.batch["token_level_rewards"],
values=data.batch["values"],
response_mask=data.batch["response_mask"],
gamma=gamma,
lam=lam,
)
# TODO (TQ): adapt core_algos.compute_pf_ppo_reweight_data function to support transfer queue
if config.get("use_pf_ppo", False):
data = core_algos.compute_pf_ppo_reweight_data(
data,
config.pf_ppo.get("reweight_method"),
config.pf_ppo.get("weight_pow"),
)
elif adv_estimator == AdvantageEstimator.GRPO:
# Initialize the mask for GRPO calculation
grpo_calculation_mask = data.batch["response_mask"]
# Call compute_grpo_outcome_advantage with parameters matching its definition
advantages, returns = core_algos.compute_grpo_outcome_advantage(
token_level_rewards=data.batch["token_level_rewards"],
response_mask=grpo_calculation_mask,
index=data.non_tensor_batch["uid"],
norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo,
)
else:
# handle all other adv estimator type other than GAE and GRPO
adv_estimator_fn = core_algos.get_adv_estimator_fn(adv_estimator)
adv_kwargs = {
"token_level_rewards": data.batch["token_level_rewards"],
"response_mask": data.batch["response_mask"],
"config": config,
}
if "uid" in data.non_tensor_batch: # optional
adv_kwargs["index"] = data.non_tensor_batch["uid"]
if "reward_baselines" in data.batch: # optional
adv_kwargs["reward_baselines"] = data.batch["reward_baselines"]
# calculate advantage estimator
advantages, returns = adv_estimator_fn(**adv_kwargs)
return advantages, returns
@tqbridge(put_data=False)
def compute_data_metrics_decorated(batch, use_critic: bool = True):
return compute_data_metrics(batch, use_critic)
@tqbridge(put_data=False)
def compute_timing_metrics_decorated(batch, timing_raw: dict[str, float]) -> dict[str, Any]:
return compute_timing_metrics(batch, timing_raw)
@tqbridge(put_data=False)
def compute_throughout_metrics_decorated(batch, timing_raw: dict[str, float], n_gpus: int) -> dict[str, Any]:
return compute_throughout_metrics(batch, timing_raw, n_gpus)
@tqbridge(put_data=False)
def calculate_debug_metrics_decorated(data):
from verl.utils.debug.metrics import calculate_debug_metrics
return calculate_debug_metrics(data)
class RayPPOTrainer:
"""Distributed PPO trainer using Ray for scalable reinforcement learning.
This trainer orchestrates distributed PPO training across multiple nodes and GPUs,
managing actor rollouts, critic training, and reward computation with Ray backend.
Supports various model architectures including FSDP, Megatron, vLLM, and SGLang integration.
"""
# TODO: support each role have individual ray_worker_group_cls,
# i.e., support different backend of different role
def __init__(
self,
config,
tokenizer,
role_worker_mapping: dict[Role, WorkerType],
resource_pool_manager: ResourcePoolManager,
ray_worker_group_cls: type[RayWorkerGroup] = RayWorkerGroup,
processor=None,
train_dataset: Optional[Dataset] = None,
val_dataset: Optional[Dataset] = None,
collate_fn=None,
train_sampler: Optional[Sampler] = None,
device_name=None,
):
"""
Initialize distributed PPO trainer with Ray backend.
Note that this trainer runs on the driver process on a single CPU/GPU node.
Args:
config: Configuration object containing training parameters.
tokenizer: Tokenizer used for encoding and decoding text.
role_worker_mapping (dict[Role, WorkerType]): Mapping from roles to worker classes.
resource_pool_manager (ResourcePoolManager): Manager for Ray resource pools.
ray_worker_group_cls (RayWorkerGroup, optional): Class for Ray worker groups. Defaults to RayWorkerGroup.
processor: Optional data processor, used for multimodal data
train_dataset (Optional[Dataset], optional): Training dataset. Defaults to None.
val_dataset (Optional[Dataset], optional): Validation dataset. Defaults to None.
collate_fn: Function to collate data samples into batches.
train_sampler (Optional[Sampler], optional): Sampler for the training dataset. Defaults to None.
device_name (str, optional): Device name for training (e.g., "cuda", "cpu"). Defaults to None.
"""
# Store the tokenizer for text processing
self.tokenizer = tokenizer
self.processor = processor
self.config = config
self.hybrid_engine = config.actor_rollout_ref.hybrid_engine
assert self.hybrid_engine, "Currently, only support hybrid engine"
if self.hybrid_engine:
assert Role.ActorRollout in role_worker_mapping, f"{role_worker_mapping.keys()=}"
self.role_worker_mapping = role_worker_mapping
self.resource_pool_manager = resource_pool_manager
self.use_reference_policy = need_reference_policy(self.config)
self.use_rm = need_reward_model(self.config)
self.use_critic = need_critic(self.config)
self.ray_worker_group_cls = ray_worker_group_cls
self.device_name = device_name if device_name else self.config.trainer.device
self.validation_generations_logger = ValidationGenerationsLogger(
project_name=self.config.trainer.project_name,
experiment_name=self.config.trainer.experiment_name,
)
lora_rank = config.actor_rollout_ref.model.get("lora", {}).get("rank", 0)
if lora_rank <= 0:
lora_rank = config.actor_rollout_ref.model.get("lora_rank", 0)
# if ref_in_actor is True, the reference policy will be actor without lora applied
self.ref_in_actor = lora_rank > 0
# define in-reward KL control
# kl loss control currently not suppoorted
if self.config.algorithm.use_kl_in_reward:
self.kl_ctrl_in_reward = core_algos.get_kl_controller(self.config.algorithm.kl_ctrl)
self._create_dataloader(train_dataset, val_dataset, collate_fn, train_sampler)
self.tq_client = self._initialize_transferqueue()
def _initialize_transferqueue(self):
# 1. initialize TransferQueueStorage
if self.config.transfer_queue.storage_backend == "AsyncSimpleStorageManager":
train_data_size = (
self.config.data.train_batch_size
* self.config.transfer_queue.num_global_batch
* self.config.actor_rollout_ref.rollout.n
)
val_data_size = self.val_dataset_size * self.config.actor_rollout_ref.rollout.val_kwargs.n
total_storage_size = train_data_size + val_data_size
self.data_system_storage_units = {}
storage_placement_group = get_placement_group(
self.config.transfer_queue.num_data_storage_units, num_cpus_per_actor=1
)
for storage_unit_rank in range(self.config.transfer_queue.num_data_storage_units):
storage_node = SimpleStorageUnit.options(
placement_group=storage_placement_group, placement_group_bundle_index=storage_unit_rank
).remote(
storage_unit_size=math.ceil(total_storage_size / self.config.transfer_queue.num_data_storage_units)
)
self.data_system_storage_units[storage_unit_rank] = storage_node
logging.info(f"SimpleStorageUnit #{storage_unit_rank} has been created.")
else:
raise NotImplementedError("Currently only support AsyncSimpleStorageManager backend in TransferQueue")
# 2. Initialize TransferQueueController (single controller only)
# Sampler usage instructions:
# For GRPO grouped sampling, you can initialize the controller with GRPOGroupNSampler:
# Option 1: Pass sampler class (will be instantiated automatically)
# self.data_system_controller = TransferQueueController.remote(sampler=GRPOGroupNSampler)
# Option 2: Pass sampler instance (if you need custom configuration)
# grpo_sampler = GRPOGroupNSampler()
# self.data_system_controller = TransferQueueController.remote(sampler=grpo_sampler)
# Then use sampling_config in get_meta calls:
# sampling_config={"n_samples_per_prompt": 4}
self.data_system_controller = TransferQueueController.remote()
logging.info("TransferQueueController has been created.")
# 3. register controller & storage and prepare necessary information
self.data_system_controller_info = process_zmq_server_info(self.data_system_controller)
if self.config.transfer_queue.storage_backend == "AsyncSimpleStorageManager":
self.data_system_storage_unit_infos = process_zmq_server_info(self.data_system_storage_units)
# Note: Need to generate a new DictConfig with allow_objects=True to preserve ZMQServerInfo instances
# (which contain socket connection details). Without this flag, OmegaConf would flatten these objects to dicts,
# breaking the transfer queue client initialization.
tq_config = OmegaConf.create({"transfer_queue": {}}, flags={"allow_objects": True})
tq_config.transfer_queue.controller_info = self.data_system_controller_info
if self.config.transfer_queue.storage_backend == "AsyncSimpleStorageManager":
tq_config.transfer_queue.storage_unit_infos = self.data_system_storage_unit_infos
self.config = OmegaConf.merge(tq_config, self.config)
# 4. create client
create_transferqueue_client(client_id="Trainer", config=self.config.transfer_queue, sync=True)
tq_client = get_transferqueue_client()
return tq_client
def _create_dataloader(self, train_dataset, val_dataset, collate_fn, train_sampler: Optional[Sampler]):
"""
Creates the train and validation dataloaders.
"""
# TODO: we have to make sure the batch size is divisible by the dp size
from verl.trainer.main_ppo import create_rl_dataset, create_rl_sampler
if train_dataset is None:
train_dataset = create_rl_dataset(
self.config.data.train_files, self.config.data, self.tokenizer, self.processor
)
if val_dataset is None:
val_dataset = create_rl_dataset(
self.config.data.val_files, self.config.data, self.tokenizer, self.processor
)
self.train_dataset, self.val_dataset = train_dataset, val_dataset
self.val_dataset_size = len(val_dataset)
if train_sampler is None:
train_sampler = create_rl_sampler(self.config.data, self.train_dataset)
if collate_fn is None:
from verl.utils.dataset.rl_dataset import collate_fn as default_collate_fn
collate_fn = default_collate_fn
num_workers = self.config.data["dataloader_num_workers"]
self.train_dataloader = StatefulDataLoader(
dataset=self.train_dataset,
batch_size=self.config.data.get("gen_batch_size", self.config.data.train_batch_size),
num_workers=num_workers,
drop_last=True,
collate_fn=collate_fn,
sampler=train_sampler,
)
val_batch_size = self.config.data.val_batch_size # Prefer config value if set
if val_batch_size is None:
val_batch_size = len(self.val_dataset)
self.val_batch_size = val_batch_size
self.val_dataloader = StatefulDataLoader(
dataset=self.val_dataset,
batch_size=val_batch_size,
num_workers=num_workers,
shuffle=self.config.data.get("validation_shuffle", True),
drop_last=False,
collate_fn=collate_fn,
)
assert len(self.train_dataloader) >= 1, "Train dataloader is empty!"
assert len(self.val_dataloader) >= 1, "Validation dataloader is empty!"
print(
f"Size of train dataloader: {len(self.train_dataloader)}, Size of val dataloader: "
f"{len(self.val_dataloader)}"
)
total_training_steps = len(self.train_dataloader) * self.config.trainer.total_epochs
if self.config.trainer.total_training_steps is not None:
total_training_steps = self.config.trainer.total_training_steps
self.total_training_steps = total_training_steps
print(f"Total training steps: {self.total_training_steps}")
try:
OmegaConf.set_struct(self.config, True)
with open_dict(self.config):
if OmegaConf.select(self.config, "actor_rollout_ref.actor.optim"):
self.config.actor_rollout_ref.actor.optim.total_training_steps = total_training_steps
if OmegaConf.select(self.config, "critic.optim"):
self.config.critic.optim.total_training_steps = total_training_steps
except Exception as e:
print(f"Warning: Could not set total_training_steps in config. Structure missing? Error: {e}")
def _dump_generations(self, inputs, outputs, gts, scores, reward_extra_infos_dict, dump_path):
"""Dump rollout/validation samples as JSONL."""
os.makedirs(dump_path, exist_ok=True)
filename = os.path.join(dump_path, f"{self.global_steps}.jsonl")
n = len(inputs)
base_data = {
"input": inputs,
"output": outputs,
"gts": gts,
"score": scores,
"step": [self.global_steps] * n,
}
for k, v in reward_extra_infos_dict.items():
if len(v) == n:
base_data[k] = v
lines = []
for i in range(n):
entry = {k: v[i] for k, v in base_data.items()}
lines.append(json.dumps(entry, ensure_ascii=False))
with open(filename, "w") as f:
f.write("\n".join(lines) + "\n")
print(f"Dumped generations to {filename}")
def _log_rollout_data(
self, log_rollout_meta: BatchMeta, reward_extra_infos_dict: dict, timing_raw: dict, rollout_data_dir: str
):
"""
Log rollout data to disk.
Args:
log_rollout_meta (BatchMeta): The batch_meta of rollout data
reward_extra_infos_dict (dict): Additional reward information to log
timing_raw (dict): Timing information for profiling
rollout_data_dir (str): Directory path to save the rollout data
"""
with marked_timer("dump_rollout_generations", timing_raw, color="green"):
data = self.tq_client.get_data(log_rollout_meta)
inputs = self.tokenizer.batch_decode(data["prompts"], skip_special_tokens=True)
outputs = self.tokenizer.batch_decode(data["responses"], skip_special_tokens=True)
scores = data["token_level_scores"].sum(-1).cpu().tolist()
sample_gts = [item.get("ground_truth", None) for item in data.get("reward_model", {})]
reward_extra_infos_to_dump = reward_extra_infos_dict.copy()
if "request_id" in log_rollout_meta.field_names:
reward_extra_infos_dict.setdefault(
"request_id",
data["request_id"].tolist(),
)
self._dump_generations(
inputs=inputs,
outputs=outputs,
gts=sample_gts,
scores=scores,
reward_extra_infos_dict=reward_extra_infos_to_dump,
dump_path=rollout_data_dir,
)
def _maybe_log_val_generations(self, inputs, outputs, scores):
"""Log a table of validation samples to the configured logger (wandb or swanlab)"""
generations_to_log = self.config.trainer.log_val_generations
if generations_to_log == 0:
return
import numpy as np
# Create tuples of (input, output, score) and sort by input text
samples = list(zip(inputs, outputs, scores, strict=True))
samples.sort(key=lambda x: x[0]) # Sort by input text
# Use fixed random seed for deterministic shuffling
rng = np.random.RandomState(42)
rng.shuffle(samples)
# Take first N samples after shuffling
samples = samples[:generations_to_log]
# Log to each configured logger
self.validation_generations_logger.log(self.config.trainer.logger, samples, self.global_steps)
def _get_gen_batch(self, batch: DataProto) -> DataProto:
reward_keys = set({"data_source", "reward_model", "extra_info", "uid"}) & batch.non_tensor_batch.keys()
# pop those keys for generation
batch_keys_to_pop = []
non_tensor_batch_keys_to_pop = set(batch.non_tensor_batch.keys()) - reward_keys
gen_batch = batch.pop(
batch_keys=batch_keys_to_pop,
non_tensor_batch_keys=list(non_tensor_batch_keys_to_pop),
)
# For agent loop, we need reward model keys to compute score.
gen_batch.non_tensor_batch.update(batch.non_tensor_batch)
return gen_batch
def _validate(self):
data_source_lst = []
reward_extra_infos_dict: dict[str, list] = defaultdict(list)
# Lists to collect samples for the table
sample_inputs = []
sample_outputs = []
sample_gts = []
sample_scores = []
sample_turns = []
sample_uids = []
for test_data in self.val_dataloader:
if "uid" not in test_data.keys():
test_data["uid"] = np.array(
[str(uuid.uuid4()) for _ in range(len(test_data["raw_prompt"]))], dtype=object
)
# repeat test data
repeated_test_data = self.repeat_dict(
test_data, repeat_times=self.config.actor_rollout_ref.rollout.val_kwargs.n, interleave=True
)
test_batch: TensorDict = self.dict_to_tensordict(repeated_test_data)
# we only do validation on rule-based rm
if self.config.reward.reward_model.enable and test_batch[0]["reward_model"]["style"] == "model":
return {}
batch_meta = self.tq_client.put(data=test_batch, partition_id=f"val_{self.global_steps - 1}")
batch_meta.update_extra_info(
{
"eos_token_id": self.tokenizer.eos_token_id,
"pad_token_id": self.tokenizer.pad_token_id,
"recompute_log_prob": False,
"do_sample": self.config.actor_rollout_ref.rollout.val_kwargs.do_sample,
"validate": True,
"global_steps": self.global_steps,
}
)
print(f"batch_meta extra_info: {batch_meta.extra_info}")
# TODO: (TQ) Support padding and unpadding to make DataProto divisible by dp_size with TransferQueue
test_output_gen_meta = self.async_rollout_manager.generate_sequences(batch_meta)
batch_meta = batch_meta.union(test_output_gen_meta)
print("validation generation end")
# Store generated outputs
test_response_meta = batch_meta.select_fields(["prompts", "responses", "uid", "reward_model"])
data = self.tq_client.get_data(test_response_meta)
output_ids = data["responses"]
output_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in output_ids]
sample_outputs.extend(output_texts)
# TODO: Can we keep special tokens except for padding tokens?
input_ids = data["prompts"]
input_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in input_ids]
sample_inputs.extend(input_texts)
sample_uids.extend(data["uid"])
ground_truths = [item.get("ground_truth", None) for item in data.get("reward_model", {})]
sample_gts.extend(ground_truths)
reward_tensor, reward_extra_info = compute_reward_decorated(batch_meta)
scores = reward_tensor.sum(-1).cpu().tolist()
sample_scores.extend(scores)
reward_extra_infos_dict["reward"].extend(scores)
print(f"len reward_extra_infos_dict['reward']: {len(reward_extra_infos_dict['reward'])}")
for key, lst in reward_extra_info.items():
reward_extra_infos_dict[key].extend(lst)
print(f"len reward_extra_infos_dict['{key}']: {len(reward_extra_infos_dict[key])}")
# collect num_turns of each prompt
if "__num_turns__" in batch_meta.field_names:
data = self.tq_client.get_data(batch_meta.select_fields(["__num_turns__"]))
sample_turns.append(data["__num_turns__"])
data_source = ["unknown"] * reward_tensor.shape[0]
if "data_source" in batch_meta.field_names:
data_source_meta = batch_meta.select_fields(["data_source"])
data = self.tq_client.get_data(data_source_meta)
data_source = data["data_source"]
data_source_lst.append(data_source)
self.tq_client.clear_samples(batch_meta)
self._maybe_log_val_generations(inputs=sample_inputs, outputs=sample_outputs, scores=sample_scores)
# dump generations
val_data_dir = self.config.trainer.get("validation_data_dir", None)
if val_data_dir:
self._dump_generations(
inputs=sample_inputs,
outputs=sample_outputs,
gts=sample_gts,
scores=sample_scores,
reward_extra_infos_dict=reward_extra_infos_dict,
dump_path=val_data_dir,
)
for key_info, lst in reward_extra_infos_dict.items():
assert len(lst) == 0 or len(lst) == len(sample_scores), f"{key_info}: {len(lst)=}, {len(sample_scores)=}"
data_sources = np.concatenate(data_source_lst, axis=0)
data_src2var2metric2val = process_validation_metrics(data_sources, sample_uids, reward_extra_infos_dict)
metric_dict = {}
for data_source, var2metric2val in data_src2var2metric2val.items():
core_var = "acc" if "acc" in var2metric2val else "reward"
for var_name, metric2val in var2metric2val.items():
n_max = max([int(name.split("@")[-1].split("/")[0]) for name in metric2val.keys()])
for metric_name, metric_val in metric2val.items():
if (
(var_name == core_var)
and any(metric_name.startswith(pfx) for pfx in ["mean", "maj", "best"])
and (f"@{n_max}" in metric_name)
):
metric_sec = "val-core"
else:
metric_sec = "val-aux"
pfx = f"{metric_sec}/{data_source}/{var_name}/{metric_name}"
metric_dict[pfx] = metric_val
if len(sample_turns) > 0:
sample_turns = np.concatenate(sample_turns)
metric_dict["val-aux/num_turns/min"] = sample_turns.min()
metric_dict["val-aux/num_turns/max"] = sample_turns.max()
metric_dict["val-aux/num_turns/mean"] = sample_turns.mean()
return metric_dict
def init_workers(self):
"""Initialize distributed training workers using Ray backend.
Creates:
1. Ray resource pools from configuration
2. Worker groups for each role (actor, critic, etc.)
"""
self.resource_pool_manager.create_resource_pool()
self.resource_pool_to_cls = {pool: {} for pool in self.resource_pool_manager.resource_pool_dict.values()}
# create actor and rollout
if self.hybrid_engine:
resource_pool = self.resource_pool_manager.get_resource_pool(Role.ActorRollout)
actor_rollout_cls = RayClassWithInitArgs(
cls=self.role_worker_mapping[Role.ActorRollout],
config=self.config.actor_rollout_ref,
role="actor_rollout",
)
self.resource_pool_to_cls[resource_pool]["actor_rollout"] = actor_rollout_cls
else:
raise NotImplementedError
# create critic
if self.use_critic:
resource_pool = self.resource_pool_manager.get_resource_pool(Role.Critic)
critic_cfg = omega_conf_to_dataclass(self.config.critic)
critic_cls = RayClassWithInitArgs(cls=self.role_worker_mapping[Role.Critic], config=critic_cfg)
self.resource_pool_to_cls[resource_pool]["critic"] = critic_cls
# create reference policy if needed
if self.use_reference_policy:
resource_pool = self.resource_pool_manager.get_resource_pool(Role.RefPolicy)
ref_policy_cls = RayClassWithInitArgs(
self.role_worker_mapping[Role.RefPolicy],
config=self.config.actor_rollout_ref,
role="ref",
)
self.resource_pool_to_cls[resource_pool]["ref"] = ref_policy_cls
# initialize WorkerGroup
# NOTE: if you want to use a different resource pool for each role, which can support different parallel size,
# you should not use `create_colocated_worker_cls`.
# Instead, directly pass different resource pool to different worker groups.
# See https://github.com/volcengine/verl/blob/master/examples/ray/tutorial.ipynb for more information.
all_wg = {}
wg_kwargs = {} # Setting up kwargs for RayWorkerGroup
if OmegaConf.select(self.config.trainer, "ray_wait_register_center_timeout") is not None:
wg_kwargs["ray_wait_register_center_timeout"] = self.config.trainer.ray_wait_register_center_timeout
if OmegaConf.select(self.config.global_profiler, "steps") is not None:
wg_kwargs["profile_steps"] = OmegaConf.select(self.config.global_profiler, "steps")
# Only require nsight worker options when tool is nsys
if OmegaConf.select(self.config.global_profiler, "tool") == "nsys":
assert (
OmegaConf.select(self.config.global_profiler.global_tool_config.nsys, "worker_nsight_options")
is not None
), "worker_nsight_options must be set when using nsys with profile_steps"
wg_kwargs["worker_nsight_options"] = OmegaConf.to_container(
OmegaConf.select(self.config.global_profiler.global_tool_config.nsys, "worker_nsight_options")
)
wg_kwargs["device_name"] = self.device_name
for resource_pool, class_dict in self.resource_pool_to_cls.items():
worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict)
wg_dict = self.ray_worker_group_cls(
resource_pool=resource_pool,
ray_cls_with_init=worker_dict_cls,
**wg_kwargs,
)
spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys())
all_wg.update(spawn_wg)
if self.use_critic:
self.critic_wg = all_wg["critic"]
self.critic_wg.init_model()
if self.use_reference_policy and not self.ref_in_actor:
self.ref_policy_wg = all_wg["ref"]
self.ref_policy_wg.init_model()
# we should create rollout at the end so that vllm can have a better estimation of kv cache memory
self.actor_rollout_wg = all_wg["actor_rollout"]
self.actor_rollout_wg.init_model()
# set transferqueue server info for each worker
for _, wg in all_wg.items():
wg.create_transferqueue_client(self.config)
# create reward loop manager
from verl.experimental.reward_loop import RewardLoopManager
# initalize reward loop manager
# reward model (colocate or standalone): get resource_pool
# no reward model: resource_pool = None
resource_pool = self.resource_pool_manager.get_resource_pool(Role.RewardModel) if self.use_rm else None
self.reward_loop_manager = RewardLoopManager(
config=self.config,
rm_resource_pool=resource_pool,
)
# create async rollout manager and request scheduler
self.async_rollout_mode = False
if self.config.actor_rollout_ref.rollout.mode == "async":
from .agent_loop import AgentLoopManager
self.async_rollout_mode = True
enable_agent_reward_loop = not self.use_rm or self.config.reward.reward_model.enable_resource_pool
reward_loop_worker_handles = (
self.reward_loop_manager.reward_loop_workers if enable_agent_reward_loop else None
)
self.async_rollout_manager = AgentLoopManager(
config=self.config,
worker_group=self.actor_rollout_wg,
reward_loop_worker_handles=reward_loop_worker_handles,
)
self.checkpoint_manager = CheckpointEngineManager(
backend=self.config.actor_rollout_ref.rollout.checkpoint_engine.backend,
trainer=self.actor_rollout_wg,
replicas=self.async_rollout_manager.rollout_replicas,
)
# sleep all replicas to load checkpoint
self.checkpoint_manager.sleep_replicas()
# TODO (TQ): initialize tq during worker init when enable TQ switch is stable
self.async_rollout_manager.create_transferqueue_client_for_workers()
def _save_checkpoint(self):
from verl.utils.fs import local_mkdir_safe
# path: given_path + `/global_step_{global_steps}` + `/actor`
local_global_step_folder = os.path.join(
self.config.trainer.default_local_dir, f"global_step_{self.global_steps}"
)
print(f"local_global_step_folder: {local_global_step_folder}")
actor_local_path = os.path.join(local_global_step_folder, "actor")
actor_remote_path = (
None
if self.config.trainer.default_hdfs_dir is None
else os.path.join(self.config.trainer.default_hdfs_dir, f"global_step_{self.global_steps}", "actor")
)
remove_previous_ckpt_in_save = self.config.trainer.get("remove_previous_ckpt_in_save", False)
if remove_previous_ckpt_in_save:
print(
"Warning: remove_previous_ckpt_in_save is deprecated,"
+ " set max_actor_ckpt_to_keep=1 and max_critic_ckpt_to_keep=1 instead"
)
max_actor_ckpt_to_keep = (
self.config.trainer.get("max_actor_ckpt_to_keep", None) if not remove_previous_ckpt_in_save else 1
)
max_critic_ckpt_to_keep = (
self.config.trainer.get("max_critic_ckpt_to_keep", None) if not remove_previous_ckpt_in_save else 1
)
self.actor_rollout_wg.save_checkpoint(
actor_local_path, actor_remote_path, self.global_steps, max_ckpt_to_keep=max_actor_ckpt_to_keep
)
if self.use_critic:
critic_local_path = os.path.join(local_global_step_folder, "critic")
critic_remote_path = (
None
if self.config.trainer.default_hdfs_dir is None
else os.path.join(self.config.trainer.default_hdfs_dir, f"global_step_{self.global_steps}", "critic")
)
self.critic_wg.save_checkpoint(
critic_local_path, critic_remote_path, self.global_steps, max_ckpt_to_keep=max_critic_ckpt_to_keep
)
# save dataloader
local_mkdir_safe(local_global_step_folder)
dataloader_local_path = os.path.join(local_global_step_folder, "data.pt")
dataloader_state_dict = self.train_dataloader.state_dict()
torch.save(dataloader_state_dict, dataloader_local_path)
# latest checkpointed iteration tracker (for atomic usage)
local_latest_checkpointed_iteration = os.path.join(
self.config.trainer.default_local_dir, "latest_checkpointed_iteration.txt"
)
with open(local_latest_checkpointed_iteration, "w") as f:
f.write(str(self.global_steps))
def _load_checkpoint(self):
if self.config.trainer.resume_mode == "disable":
return 0
# load from hdfs
if self.config.trainer.default_hdfs_dir is not None:
raise NotImplementedError("load from hdfs is not implemented yet")
else:
checkpoint_folder = self.config.trainer.default_local_dir # TODO: check path
if not os.path.isabs(checkpoint_folder):
working_dir = os.getcwd()
checkpoint_folder = os.path.join(working_dir, checkpoint_folder)
global_step_folder = find_latest_ckpt_path(checkpoint_folder) # None if no latest
# find global_step_folder
if self.config.trainer.resume_mode == "auto":
if global_step_folder is None:
print("Training from scratch")
return 0
else:
if self.config.trainer.resume_mode == "resume_path":
assert isinstance(self.config.trainer.resume_from_path, str), "resume ckpt must be str type"
assert "global_step_" in self.config.trainer.resume_from_path, (
"resume ckpt must specify the global_steps"
)
global_step_folder = self.config.trainer.resume_from_path
if not os.path.isabs(global_step_folder):
working_dir = os.getcwd()
global_step_folder = os.path.join(working_dir, global_step_folder)
print(f"Load from checkpoint folder: {global_step_folder}")
# set global step
self.global_steps = int(global_step_folder.split("global_step_")[-1])
print(f"Setting global step to {self.global_steps}")
print(f"Resuming from {global_step_folder}")
actor_path = os.path.join(global_step_folder, "actor")
critic_path = os.path.join(global_step_folder, "critic")
# load actor
self.actor_rollout_wg.load_checkpoint(
actor_path, del_local_after_load=self.config.trainer.del_local_ckpt_after_load
)
# load critic
if self.use_critic:
self.critic_wg.load_checkpoint(
critic_path, del_local_after_load=self.config.trainer.del_local_ckpt_after_load
)
# load dataloader,
# TODO: from remote not implemented yet
dataloader_local_path = os.path.join(global_step_folder, "data.pt")
if os.path.exists(dataloader_local_path):
dataloader_state_dict = torch.load(dataloader_local_path, weights_only=False)
self.train_dataloader.load_state_dict(dataloader_state_dict)
else:
print(f"Warning: No dataloader state found at {dataloader_local_path}, will start from scratch")
def _start_profiling(self, do_profile: bool) -> None:
"""Start profiling for all worker groups if profiling is enabled."""
if do_profile:
self.actor_rollout_wg.start_profile(role="e2e", profile_step=self.global_steps)
if self.use_reference_policy:
self.ref_policy_wg.start_profile(profile_step=self.global_steps)
if self.use_critic:
self.critic_wg.start_profile(profile_step=self.global_steps)
def _stop_profiling(self, do_profile: bool) -> None:
"""Stop profiling for all worker groups if profiling is enabled."""
if do_profile:
self.actor_rollout_wg.stop_profile()
if self.use_reference_policy:
self.ref_policy_wg.stop_profile()
if self.use_critic:
self.critic_wg.stop_profile()
def _balance_batch(
self, batch: BatchMeta, tq_client, metrics, logging_prefix="global_seqlen", keep_minibatch=False
):
"""Reorder the batchmeta on single controller such that each dp rank gets similar total tokens"""
data = tq_client.get_data(batch)
attention_mask = data["attention_mask"]
batch_size = attention_mask.shape[0]
global_seqlen_lst = data["attention_mask"].view(batch_size, -1).sum(-1) # (train_batch_size,)
global_seqlen_lst = calculate_workload(global_seqlen_lst)
world_size = self.actor_rollout_wg.world_size
if keep_minibatch:
# Decouple the DP balancing and mini-batching.
minibatch_size = self.config.actor_rollout_ref.actor.get("ppo_mini_batch_size", None)
if minibatch_size is None:
raise ValueError("'ppo_mini_batch_size' must be set in actor config when 'keep_minibatch' is True.")
minibatch_num = len(global_seqlen_lst) // minibatch_size
global_partition_lst = [[] for _ in range(world_size)]
for i in range(minibatch_num):
rearrange_minibatch_lst = get_seqlen_balanced_partitions(
global_seqlen_lst[i * minibatch_size : (i + 1) * minibatch_size],
k_partitions=world_size,
equal_size=True,
)
for j, part in enumerate(rearrange_minibatch_lst):
global_partition_lst[j].extend([x + minibatch_size * i for x in part])
else:
global_partition_lst = get_seqlen_balanced_partitions(
global_seqlen_lst, k_partitions=world_size, equal_size=True
)
# Place smaller micro-batches at both ends to reduce the bubbles in pipeline parallel.
for idx, partition in enumerate(global_partition_lst):
partition.sort(key=lambda x: (global_seqlen_lst[x], x))
ordered_partition = partition[::2] + partition[1::2][::-1]
global_partition_lst[idx] = ordered_partition
# reorder based on index. The data will be automatically equally partitioned by dispatch function
global_idx = [j for partition in global_partition_lst for j in partition]
global_balance_stats = log_seqlen_unbalance(
seqlen_list=global_seqlen_lst, partitions=global_partition_lst, prefix=logging_prefix
)
metrics.update(global_balance_stats)
return global_idx
@classmethod
def repeat_dict(
cls, batch_dict: dict[str, torch.Tensor | np.ndarray], repeat_times=2, interleave=True
) -> dict[str, torch.Tensor | np.ndarray]:
"""
Repeat the batch dict a specified number of times.
Args:
repeat_times (int): Number of times to repeat the data.
interleave (bool): Whether to interleave the repeated data.
Returns:
dict: A new dict with repeated data.
"""
if repeat_times == 1:
return batch_dict
repeated_batch_dict = {}
if batch_dict:
if interleave:
# Interleave the data
for key, val in batch_dict.items():
if isinstance(val, torch.Tensor):
repeated_batch_dict[key] = val.repeat_interleave(repeat_times, dim=0)
elif isinstance(val, np.ndarray):
repeated_batch_dict[key] = np.repeat(val, repeat_times, axis=0)
else:
raise ValueError(f"Unsupported type in data {type(val)}")
else:
# Stack the data
for key, val in batch_dict.items():
if isinstance(val, torch.Tensor):
repeated_batch_dict[key] = (
val.unsqueeze(0).expand(repeat_times, *val.shape).reshape(-1, *val.shape[1:])
)
elif isinstance(val, np.ndarray):
repeated_batch_dict[key] = np.tile(val, (repeat_times,) + (1,) * (val.ndim - 1))
else:
raise ValueError(f"Unsupported type in data {type(val)}")
return repeated_batch_dict
@classmethod
def dict_to_tensordict(cls, data: dict[str, torch.Tensor | np.ndarray]) -> TensorDict:
"""
Create a TensorDict from a dict of tensors and non_tensors.
Note that this requires tensordict version at least 0.10
"""
assert parse_version(tensordict.__version__) >= parse_version("0.10"), (
"Storing non-tensor data in TensorDict at least requires tensordict version 0.10"
)
tensors_batch = {}
batch_size = None
for key, val in data.items():
if isinstance(val, torch.Tensor | np.ndarray):
tensors_batch[key] = val
else:
raise ValueError(f"Unsupported type in data {type(val)}")
if batch_size is None:
batch_size = len(val)
else:
assert len(val) == batch_size
if batch_size is None:
batch_size = []
else:
batch_size = [batch_size]
return TensorDict(tensors_batch, batch_size=batch_size)
def fit(self):
"""
The training loop of PPO.
The driver process only need to call the compute functions of the worker group through RPC
to construct the PPO dataflow.
The light-weight advantage computation is done on the driver process.
"""
from omegaconf import OmegaConf
from verl.utils.tracking import Tracking
logger = Tracking(
project_name=self.config.trainer.project_name,
experiment_name=self.config.trainer.experiment_name,
default_backend=self.config.trainer.logger,
config=OmegaConf.to_container(self.config, resolve=True),
)
self.global_steps = 0
# load checkpoint and update weights before doing anything
self._load_checkpoint()
self.checkpoint_manager.update_weights()
# perform validation before training
# currently, we only support validation using the reward_function.
if self.config.trainer.get("val_before_train", True):
val_metrics = self._validate()
assert val_metrics, f"{val_metrics=}"
pprint(f"Initial validation metrics: {val_metrics}")
logger.log(data=val_metrics, step=self.global_steps)
if self.config.trainer.get("val_only", False):
return
if self.config.actor_rollout_ref.rollout.get("skip_rollout", False):
rollout_skip = RolloutSkip(self.config, self.actor_rollout_wg)
rollout_skip.wrap_generate_sequences()
# add tqdm
progress_bar = tqdm(total=self.total_training_steps, initial=self.global_steps, desc="Training Progress")
# we start from step 1
self.global_steps += 1
last_val_metrics = None
self.max_steps_duration = 0
prev_step_profile = False
curr_step_profile = (
self.global_steps in self.config.global_profiler.steps
if self.config.global_profiler.steps is not None
else False
)
next_step_profile = False
for epoch in range(self.config.trainer.total_epochs):
for batch_dict in self.train_dataloader:
metrics = {}
timing_raw = {}
base_get_meta_kwargs = dict(
batch_size=self.config.data.train_batch_size * self.config.actor_rollout_ref.rollout.n,
partition_id=f"train_{self.global_steps - 1}", # self.global_steps starts from 1
)
with marked_timer("start_profile", timing_raw):
self._start_profiling(
not prev_step_profile and curr_step_profile
if self.config.global_profiler.profile_continuous_steps
else curr_step_profile
)
# add uid to batch
batch_dict["uid"] = np.array(
[str(uuid.uuid4()) for _ in range(len(batch_dict["raw_prompt"]))], dtype=object
)
# When n > 1, repeat input data before putting to data system, simulating DataProto repeat.
repeated_batch_dict = self.repeat_dict(
batch_dict, repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True
)
batch: TensorDict = self.dict_to_tensordict(repeated_batch_dict)
gen_meta = self.tq_client.put(data=batch, partition_id=f"train_{self.global_steps - 1}")
# pass global_steps to trace
gen_meta.set_extra_info("global_steps", self.global_steps)
is_last_step = self.global_steps >= self.total_training_steps
with marked_timer("step", timing_raw):
# generate a batch
with marked_timer("gen", timing_raw, color="red"):
gen_output_meta = self.async_rollout_manager.generate_sequences(gen_meta)
self.checkpoint_manager.sleep_replicas()
timing_raw.update(gen_output_meta.extra_info["timing"])
gen_output_meta.extra_info.pop("timing", None)
# TODO (TQ): support transfer queue
# if self.config.algorithm.adv_estimator == AdvantageEstimator.REMAX:
# if self.reward_fn is None:
# raise ValueError("A reward_fn is required for REMAX advantage estimation.")
#
# with marked_timer("gen_max", timing_raw, color="purple"):
# gen_baseline_meta = deepcopy(gen_meta)
# gen_baseline_meta.extra_info["do_sample"] = False
# if not self.async_rollout_mode:
# gen_baseline_output = self.actor_rollout_wg.generate_sequences(gen_baseline_meta)
# else:
# gen_baseline_output = self.async_rollout_manager.generate_sequences(gen_baseline_meta)
# batch = batch.union(gen_baseline_output)
# reward_baseline_tensor = self.reward_fn(batch)
# reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1)
#
# batch.pop(batch_keys=list(gen_baseline_output.batch.keys()))
#
# batch.batch["reward_baselines"] = reward_baseline_tensor
#
# del gen_baseline_batch, gen_baseline_output
batch_meta: BatchMeta = gen_meta.union(gen_output_meta)
if "response_mask" not in batch_meta.field_names:
response_mask_meta = self.tq_client.get_meta(
data_fields=["responses", "attention_mask"],
task_name="compute_response_mask",
**base_get_meta_kwargs,
)
response_mask_output_meta = compute_response_mask(response_mask_meta, self.tq_client)
batch_meta = batch_meta.union(response_mask_output_meta)
# Balance the number of valid tokens across DP ranks.
# NOTE: This usually changes the order of data in the `batch`,
# which won't affect the advantage calculation (since it's based on uid),
# but might affect the loss calculation (due to the change of mini-batching).
# TODO: Decouple the DP balancing and mini-batching.
attention_mask_meta = batch_meta.select_fields(["attention_mask"])
balanced_idx = None
if self.config.trainer.balance_batch:
balanced_idx = self._balance_batch(attention_mask_meta, self.tq_client, metrics=metrics)
batch_meta.reorder(balanced_idx)
# compute global_valid tokens
data = self.tq_client.get_data(attention_mask_meta)
batch_meta.extra_info["global_token_num"] = torch.sum(data["attention_mask"], dim=-1).tolist()
with marked_timer("reward", timing_raw, color="yellow"):
# compute reward model score
if self.use_rm and "rm_scores" not in batch_meta.field_names:
reward_meta = self.rm_wg.compute_rm_score(batch_meta)
batch_meta = batch_meta.union(reward_meta)
compute_reward_fields = [
"responses",
"prompts",
"attention_mask",
"reward_model",
"data_source",
]
if "rm_scores" in batch_meta.field_names:
compute_reward_fields.extend(
["rm_scores", *set(batch_meta.extra_info["reward_extra_keys"])]
)
reward_tensor, reward_extra_infos_dict = compute_reward_decorated(batch_meta)
compute_reward_meta = batch_meta.select_fields(compute_reward_fields)
batch_meta = batch_meta.union(compute_reward_meta)
# recompute old_log_probs
with marked_timer("old_log_prob", timing_raw, color="blue"):
old_log_prob_meta_fields = [
"input_ids",
"attention_mask",
"position_ids",
"prompts",
"responses",
"response_mask",
"data_source",
"reward_model",
"extra_info",
"uid",
"index",
"tools_kwargs",
"interaction_kwargs",
"ability",
]
old_log_prob_meta = batch_meta.select_fields(old_log_prob_meta_fields)
old_log_prob_output_meta = self.actor_rollout_wg.compute_log_prob(old_log_prob_meta)
batch_meta = batch_meta.union(old_log_prob_output_meta)
data = self.tq_client.get_data(old_log_prob_output_meta)
entropys = data["entropys"]
response_masks = data["response_mask"]
actor_config = self.config.actor_rollout_ref.actor
entropy_agg = agg_loss(
loss_mat=entropys,
loss_mask=response_masks,
loss_agg_mode=actor_config.loss_agg_mode,
loss_scale_factor=actor_config.loss_scale_factor,
)
old_log_prob_metrics = {"actor/entropy": entropy_agg.detach().item()}
metrics.update(old_log_prob_metrics)
if "rollout_log_probs" in batch_meta.field_names:
# TODO: we may want to add diff of probs too.
calculate_debug_metrics_fields = ["rollout_log_probs", "old_log_probs", "responses"]
if "response_mask" in batch_meta.field_names:
calculate_debug_metrics_fields.append("response_mask")
if "attention_mask" in batch_meta.field_names:
calculate_debug_metrics_fields.append("attention_mask")
calculate_debug_metrics_meta = batch_meta.select_fields(calculate_debug_metrics_fields)
metrics.update(calculate_debug_metrics_decorated(calculate_debug_metrics_meta))
if self.use_reference_policy:
# compute reference log_prob
ref_log_prob_fields = [
"input_ids",
"attention_mask",
"position_ids",
"prompts",
"responses",
"response_mask",
"old_log_probs",
"data_source",
"reward_model",
"extra_info",
"uid",
"index",
"tools_kwargs",
"interaction_kwargs",
"ability",
]
ref_log_prob_meta = batch_meta.select_fields(ref_log_prob_fields)
with marked_timer("ref", timing_raw, color="olive"):
if not self.ref_in_actor:
ref_log_prob_output_meta = self.ref_policy_wg.compute_ref_log_prob(ref_log_prob_meta)
else:
ref_log_prob_output_meta = self.actor_rollout_wg.compute_ref_log_prob(ref_log_prob_meta)
batch_meta = batch_meta.union(ref_log_prob_output_meta)
# compute values
if self.use_critic:
with marked_timer("values", timing_raw, color="cyan"):
values_meta = self.critic_wg.compute_values(batch_meta)
batch_meta = batch_meta.union(values_meta)
with marked_timer("adv", timing_raw, color="brown"):
# we combine with rule-based rm
reward_extra_infos_dict: dict[str, list]
reward_td = TensorDict({"token_level_scores": reward_tensor}, batch_size=reward_tensor.size(0))
batch_meta = self.tq_client.put(data=reward_td, metadata=batch_meta)
if reward_extra_infos_dict:
reward_extra_infos_dict_new = {k: np.array(v) for k, v in reward_extra_infos_dict.items()}
reward_extra_infos_td = self.dict_to_tensordict(reward_extra_infos_dict_new)
batch_meta = self.tq_client.put(data=reward_extra_infos_td, metadata=batch_meta)
# compute rewards. apply_kl_penalty if available
if self.config.algorithm.use_kl_in_reward:
apply_kl_penalty_fields = [
"response_mask",
"token_level_scores",
"old_log_probs",
"ref_log_prob",
]
apply_kl_penalty_meta = batch_meta.select_fields(apply_kl_penalty_fields)
token_level_rewards, kl_metrics = apply_kl_penalty(
apply_kl_penalty_meta,
kl_ctrl=self.kl_ctrl_in_reward,
kl_penalty=self.config.algorithm.kl_penalty,
)
token_level_rewards_td = TensorDict(
{"token_level_rewards": token_level_rewards}, batch_size=token_level_rewards.size(0)
)
apply_kl_penalty_meta = self.tq_client.put(
data=token_level_rewards_td, metadata=apply_kl_penalty_meta
)
metrics.update(kl_metrics)
batch_meta = batch_meta.union(apply_kl_penalty_meta)
else:
token_level_scores_meta = batch_meta.select_fields(["token_level_scores"])
data = self.tq_client.get_data(token_level_scores_meta)
token_level_rewards_td = TensorDict(
{"token_level_rewards": data["token_level_scores"]},
batch_size=data["token_level_scores"].size(0),
)
token_level_scores_meta = self.tq_client.put(
data=token_level_rewards_td, metadata=token_level_scores_meta
)
batch_meta = batch_meta.union(token_level_scores_meta)
# compute advantages, executed on the driver process
norm_adv_by_std_in_grpo = self.config.algorithm.get(
"norm_adv_by_std_in_grpo", True
) # GRPO adv normalization factor
assert "response_mask" in batch_meta.field_names, (
f"`response_mask` must be in batch_meta {batch_meta.field_names} for advantage computation"
)
compute_advantage_fields = [
"response_mask",
"token_level_rewards",
]
if self.config.algorithm.adv_estimator == AdvantageEstimator.GAE:
compute_advantage_fields.append("values")
elif self.config.algorithm.adv_estimator == AdvantageEstimator.GRPO:
compute_advantage_fields.append("uid")
else:
if "uid" in batch_meta.field_names:
compute_advantage_fields.append("uid")
if "reward_baselines" in batch_meta.field_names:
compute_advantage_fields.append("reward_baselines")
compute_advantage_meta = batch_meta.select_fields(compute_advantage_fields)
advantages, returns = compute_advantage(
compute_advantage_meta,
adv_estimator=self.config.algorithm.adv_estimator,
gamma=self.config.algorithm.gamma,
lam=self.config.algorithm.lam,
num_repeat=self.config.actor_rollout_ref.rollout.n,
norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo,
config=self.config.algorithm,
)
advantages_td = TensorDict(
{"advantages": advantages, "returns": returns}, batch_size=advantages.size(0)
)
compute_advantage_meta = self.tq_client.put(data=advantages_td, metadata=compute_advantage_meta)
batch_meta = batch_meta.union(compute_advantage_meta)
# update critic
if self.use_critic:
with marked_timer("update_critic", timing_raw, color="pink"):
critic_output_meta = self.critic_wg.update_critic(batch_meta)
batch_meta = batch_meta.union(critic_output_meta)
critic_output_metrics = reduce_metrics(critic_output_meta.extra_info["metrics"])
metrics.update(critic_output_metrics)
# implement critic warmup
if self.config.trainer.critic_warmup <= self.global_steps:
# update actor
with marked_timer("update_actor", timing_raw, color="red"):
batch_meta.extra_info["multi_turn"] = (
self.config.actor_rollout_ref.rollout.multi_turn.enable
)
update_actor_fields = [
"input_ids",
"attention_mask",
"position_ids",
"prompts",
"responses",
"response_mask",
"old_log_probs",
"ref_log_prob",
"advantages",
"returns",
"token_level_rewards",
"token_level_scores",
"data_source",
"reward_model",
"extra_info",
"uid",
"index",
"tools_kwargs",
"interaction_kwargs",
"ability",
]
update_actor_meta = batch_meta.select_fields(update_actor_fields)
update_actor_meta.set_extra_info(
"global_token_num", batch_meta.get_extra_info("global_token_num")
)
update_actor_meta.set_extra_info("temperature", batch_meta.get_extra_info("temperature"))
actor_output_meta = self.actor_rollout_wg.update_actor(update_actor_meta)
batch_meta = batch_meta.union(actor_output_meta)
# update weights from trainer to rollout
with marked_timer("update_weights", timing_raw, color="red"):
self.checkpoint_manager.update_weights()
actor_output_metrics = reduce_metrics(actor_output_meta.extra_info["metrics"])
metrics.update(actor_output_metrics)
# Log rollout generations if enabled
rollout_data_dir = self.config.trainer.get("rollout_data_dir", None)
if rollout_data_dir:
log_rollout_fields = ["prompts", "responses", "token_level_scores", "reward_model"]
if "request_id" in batch_meta.field_names:
log_rollout_fields.append("request_id")
log_rollout_meta = batch_meta.select_fields(log_rollout_fields)
self._log_rollout_data(log_rollout_meta, reward_extra_infos_dict, timing_raw, rollout_data_dir)
# TODO: validate
if self.config.trainer.test_freq > 0 and (
is_last_step or self.global_steps % self.config.trainer.test_freq == 0
):
with marked_timer("testing", timing_raw, color="green"):
val_metrics: dict = self._validate()
if is_last_step:
last_val_metrics = val_metrics
metrics.update(val_metrics)
# Check if the ESI (Elastic Server Instance)/training plan is close to expiration.
esi_close_to_expiration = should_save_ckpt_esi(
max_steps_duration=self.max_steps_duration,
redundant_time=self.config.trainer.esi_redundant_time,
)
# Check if the conditions for saving a checkpoint are met.
# The conditions include a mandatory condition (1) and
# one of the following optional conditions (2/3/4):
# 1. The save frequency is set to a positive value.
# 2. It's the last training step.
# 3. The current step number is a multiple of the save frequency.
# 4. The ESI(Elastic Server Instance)/training plan is close to expiration.
if self.config.trainer.save_freq > 0 and (
is_last_step or self.global_steps % self.config.trainer.save_freq == 0 or esi_close_to_expiration
):
if esi_close_to_expiration:
print("Force saving checkpoint: ESI instance expiration approaching.")
with marked_timer("save_checkpoint", timing_raw, color="green"):
self._save_checkpoint()
with marked_timer("stop_profile", timing_raw):
next_step_profile = (
self.global_steps + 1 in self.config.global_profiler.steps
if self.config.global_profiler.steps is not None
else False
)
self._stop_profiling(
curr_step_profile and not next_step_profile
if self.config.global_profiler.profile_continuous_steps
else curr_step_profile
)
prev_step_profile = curr_step_profile
curr_step_profile = next_step_profile
steps_duration = timing_raw["step"]
self.max_steps_duration = max(self.max_steps_duration, steps_duration)
# training metrics
metrics.update(
{
"training/global_step": self.global_steps,
"training/epoch": epoch,
}
)
# collect metrics
compute_data_metrics_fields = [
"token_level_rewards",
"token_level_scores",
"advantages",
"returns",
"responses",
"attention_mask",
"response_mask",
]
if "__num_turns__" in batch_meta.field_names:
compute_data_metrics_fields.append("__num_turns__")
if "tool_call_counts" in batch_meta.field_names:
compute_data_metrics_fields.append("tool_call_counts")
compute_data_metrics_meta = batch_meta.select_fields(compute_data_metrics_fields)
compute_data_metrics_meta.reorder(balanced_idx)
metrics.update(
compute_data_metrics_decorated(batch=compute_data_metrics_meta, use_critic=self.use_critic)
)
compute_timing_metrics_fields = ["responses", "attention_mask"]
compute_timing_metrics_meta = batch_meta.select_fields(compute_timing_metrics_fields)
compute_timing_metrics_meta.reorder(balanced_idx)
metrics.update(
compute_timing_metrics_decorated(batch=compute_timing_metrics_meta, timing_raw=timing_raw)
)
compute_throughout_metrics_meta = BatchMeta(
samples=[],
extra_info={"global_token_num": batch_meta.get_extra_info("global_token_num")},
)
# TODO: implement actual tflpo and theoretical tflpo
n_gpus = self.resource_pool_manager.get_n_gpus()
metrics.update(
compute_throughout_metrics_decorated(
batch=compute_throughout_metrics_meta, timing_raw=timing_raw, n_gpus=n_gpus
)
)
# this is experimental and may be changed/removed in the future in favor of a general-purpose one
if isinstance(self.train_dataloader.sampler, AbstractCurriculumSampler):
# TODO (TQ) :support transfer queue
self.train_dataloader.sampler.update(batch=batch)
self.tq_client.clear_samples(batch_meta)
# TODO: make a canonical logger that supports various backend
logger.log(data=metrics, step=self.global_steps)
progress_bar.update(1)
self.global_steps += 1
if (
hasattr(self.config.actor_rollout_ref.actor, "profiler")
and self.config.actor_rollout_ref.actor.profiler.tool == "torch_memory"
):
self.actor_rollout_wg.dump_memory_snapshot(
tag=f"post_update_step{self.global_steps}", sub_dir=f"step{self.global_steps}"
)
if is_last_step:
pprint(f"Final validation metrics: {last_val_metrics}")
progress_bar.close()
return
# this is experimental and may be changed/removed in the future
# in favor of a general-purpose data buffer pool
if hasattr(self.train_dataset, "on_batch_end"):
# The dataset may be changed after each training batch
# TODO (TQ): support transfer queue
self.train_dataset.on_batch_end(batch=batch)
|
verl__experimental__transfer_queue__ray_trainer.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Single Process Actor
"""
import logging
import torch
from tensordict.base import TensorDictBase
from torch import nn
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
import verl.utils.torch_functional as verl_F
from verl.protocol import DataProto
from verl.trainer.ppo import core_algos
from verl.utils.device import get_device_id, get_device_name
from verl.utils.py_functional import append_to_dict
from verl.utils.seqlen_balancing import prepare_dynamic_batch, restore_dynamic_batch
from verl.utils.torch_functional import logprobs_from_logits
from verl.workers.actor import BasePPOActor
logger = logging.getLogger(__name__)
__all__ = ["RobDataParallelPPOActor"]
class RobDataParallelPPOActor(BasePPOActor):
def __init__(
self,
config,
actor_module: nn.Module,
actor_optimizer: torch.optim.Optimizer = None,
):
"""When optimizer is None, it is Reference Policy"""
super().__init__(config)
self.actor_module = actor_module
self.actor_optimizer = actor_optimizer
self.use_remove_padding = self.config.get("use_remove_padding", False)
logger.info(f"Actor use_remove_padding={self.use_remove_padding}")
logger.info(f"PRM use dynamic bsz={self.config.get('use_dynamic_bsz', False)}")
self.ulysses_sequence_parallel_size = self.config.ulysses_sequence_parallel_size
self.use_ulysses_sp = False # self.ulysses_sequence_parallel_size > 1
self.compute_entropy_from_logits = torch.compile(verl_F.entropy_from_logits, dynamic=True)
def process_tensor(self, tensor, pad_id):
mask = tensor != pad_id
if not torch.all(mask == mask[0:1], dim=1).all():
raise ValueError("Padding error!")
base_mask = mask[0]
valid_len = base_mask.sum().item()
return tensor[:, base_mask], valid_len
def generate_traj_mask(self, end_step, traj_len):
"""
Args:
end_step: (batch_size,),
traj_len:
Returns:
mask: (batch_size, traj_len),
"""
steps = torch.arange(traj_len, device=end_step.device) # (traj_len,)
steps_expanded = steps.unsqueeze(0).expand(end_step.size(0), -1)
mask = steps_expanded < end_step.unsqueeze(1) # (batch_size, traj_len)
return mask
def apply_mask_with_grad_control(self, log_probs, entropy, mask):
"""
Args:
log_probs: (batch_size, 7*8)
entropy: (batch_size, 7*8)
# mask: (batch_size, 8)
mask: (batch_size, 7*8)
Returns:
log_probs_masked:
entropy_masked:
"""
mask = mask.to(log_probs.device)
log_probs_masked = torch.where(mask, log_probs, torch.zeros_like(log_probs, requires_grad=False))
entropy_masked = torch.where(mask, entropy, torch.zeros_like(entropy, requires_grad=False))
return log_probs_masked, entropy_masked
def _forward_micro_batch(self, micro_batch, temperature) -> tuple[torch.Tensor, torch.Tensor]:
"""
micro_batch:
Returns:
entropy: # (bs, response_len)
log_probs: # (bs, response_len)
"""
with torch.autocast(device_type=get_device_name(), dtype=torch.bfloat16):
input_ids = micro_batch["input_ids"]
attention_mask = micro_batch["attention_mask"]
pixel_values = micro_batch["pixel_values"]
responses = micro_batch["responses"]
input_ids_unpad, _ = self.process_tensor(input_ids, self.pad_token_id)
attention_mask_unpad, _ = self.process_tensor(attention_mask, 0)
logits = self.actor_module(
input_ids=input_ids_unpad,
attention_mask=attention_mask_unpad,
pixel_values=pixel_values,
) # prevent model thinks we are generating
assert self.actor_module.vocab_size == 32000
start_index = self.actor_module.vocab_size - 256
logits = logits[..., -256 - 64 : -64] # Shape: [batch_size, seq_len, 256]
responses = responses - start_index
# assert (0<=responses<=255).all()
logits = logits.div(temperature)
log_probs = logprobs_from_logits(logits, responses.to(logits.device))
entropy = verl_F.entropy_from_logits(logits) # (bsz, response_length)
# assert len(log_probs.shape) == 2 and len(entropy.shape) == 2
# TODO(caiyunke.astra): check here
mask = micro_batch["response_mask"]
log_probs, entropy = self.apply_mask_with_grad_control(log_probs, entropy, mask)
return entropy, log_probs
def _forward_micro_batch_update(
self, input_ids, attention_mask, pixel_values, responses, temperature
) -> tuple[torch.Tensor, torch.Tensor]:
with torch.autocast(device_type=get_device_name(), dtype=torch.bfloat16):
input_ids_unpad, _ = self.process_tensor(input_ids, self.pad_token_id)
attention_mask_unpad, _ = self.process_tensor(attention_mask, 0)
logits = self.actor_module(
input_ids=input_ids_unpad,
attention_mask=attention_mask_unpad,
pixel_values=pixel_values,
)
assert logits.requires_grad
assert self.actor_module.vocab_size == 32000
start_index = self.actor_module.vocab_size - 256
logits = logits[..., -256 - 64 : -64] # Shape: [batch_size, seq_len, 256]
responses = responses - start_index
logits = logits.div(temperature)
log_probs = logprobs_from_logits(logits, responses)
entropy = verl_F.entropy_from_logits(logits) # (bsz, response_length)
return entropy, log_probs
def _optimizer_step(self):
assert self.config.grad_clip is not None
if isinstance(self.actor_module, FSDP):
grad_norm = self.actor_module.clip_grad_norm_(max_norm=self.config.grad_clip)
else:
grad_norm = torch.nn.utils.clip_grad_norm_(self.actor_module.parameters(), max_norm=self.config.grad_clip)
self.actor_optimizer.step()
return grad_norm
def compute_log_prob(self, data: DataProto, calculate_entropy=False) -> torch.Tensor:
"""Compute the log probability of the responses given input_ids, attention_mask and position_ids
Args:
data (DataProto): a DataProto containing keys
``input_ids``: tensor of shape [batch_size, sequence_length]. torch.int64. Note that input_ids is the
concatenation of prompt and response. Note that ``sequence_length = prompt_length + response_length``.
``attention_mask``: tensor of shape [batch_size, sequence_length]. torch.int64.
``position_ids``: tensor of shape [batch_size, sequence_length]. torch.int64.
``responses``: tensor of shape [batch_size, response_length]. torch.int64.
Returns:
torch.Tensor: the log_prob tensor
"""
self.actor_module.eval()
micro_batch_size = data.meta_info["micro_batch_size"] # 256
temperature = data.meta_info[
"temperature"
] # temperature must be in the data.meta_info to avoid slient error # 1
use_dynamic_bsz = data.meta_info["use_dynamic_bsz"] # trues
self.pad_token_id = data.meta_info["pad_token_id"]
select_keys = ["responses", "input_ids", "attention_mask", "pixel_values", "response_mask"]
data = data.select(batch_keys=select_keys).batch
if use_dynamic_bsz:
max_token_len = data.meta_info["max_token_len"] * self.ulysses_sequence_parallel_size
micro_batches, batch_idx_list = prepare_dynamic_batch(data, max_token_len=max_token_len)
else:
micro_batches = data.split(micro_batch_size)
log_probs_lst = []
entropy_lst = []
for micro_batch in micro_batches:
with torch.no_grad():
entropy, log_probs = self._forward_micro_batch(micro_batch, temperature=temperature)
log_probs_lst.append(log_probs)
if calculate_entropy:
entropy_lst.append(entropy)
log_probs = torch.concat(log_probs_lst, dim=0)
entropys = None
if calculate_entropy:
entropys = torch.concat(entropy_lst, dim=0)
if use_dynamic_bsz:
log_probs = restore_dynamic_batch(log_probs, batch_idx_list)
if calculate_entropy:
entropys = restore_dynamic_batch(entropys, batch_idx_list)
return {"log_probs": log_probs, "entropys": entropys}
def update_policy(self, data: DataProto):
self.actor_module.train()
assert self.config.ppo_mini_batch_size % self.config.ppo_micro_batch_size_per_gpu == 0
self.gradient_accumulation = self.config.ppo_mini_batch_size // self.config.ppo_micro_batch_size_per_gpu
temperature = data.meta_info["temperature"] # temperature must be in the data.meta_info to avoid slient error
select_keys = [
"responses",
"response_mask",
"input_ids",
"attention_mask",
"pixel_values",
"old_log_probs",
"advantages",
]
batch = data.select(batch_keys=select_keys).batch
self.pad_token_id = data.meta_info["pad_token_id"]
# TODO(caiyunke.astra): check here
# assert self.config.ppo_micro_batch_size_per_gpu == 1
# Split to make minibatch iterator for updating the actor
# See PPO paper for details. https://arxiv.org/abs/1707.06347
mini_batches = batch.split(self.config.ppo_mini_batch_size)
metrics = {}
for batch_idx, mini_batch in enumerate(mini_batches):
if self.config.use_dynamic_bsz:
max_token_len = self.config.ppo_max_token_len_per_gpu * self.ulysses_sequence_parallel_size
micro_batches, _ = prepare_dynamic_batch(mini_batch, max_token_len=max_token_len)
else:
self.gradient_accumulation = self.config.ppo_mini_batch_size // self.config.ppo_micro_batch_size_per_gpu
micro_batches = mini_batch.split(self.config.ppo_micro_batch_size_per_gpu)
self.actor_optimizer.zero_grad()
for _, micro_batch in enumerate[DataProto | TensorDictBase](micro_batches):
micro_batch = micro_batch.to(get_device_id()) # actor device is cpu when using offload
responses = micro_batch["responses"]
response_mask = micro_batch["response_mask"] # (batch_size, traj_len)
old_log_prob = micro_batch["old_log_probs"]
advantages = micro_batch["advantages"]
# clip_ratio = self.config.clip_ratio
clip_ratio_high = self.config.clip_ratio_high
clip_ratio_low = self.config.clip_ratio_low
input_ids = micro_batch["input_ids"]
attention_mask = micro_batch["attention_mask"]
pixel_values = micro_batch["pixel_values"]
responses = micro_batch["responses"]
loss_info = {
"actor/pg_loss": 0,
"actor/pg_clipfrac": 0,
"actor/ppo_kl": 0,
"actor/pg_clipfrac_lower": 0,
}
_, log_prob = self._forward_micro_batch_update(
input_ids=input_ids,
attention_mask=attention_mask,
pixel_values=pixel_values,
responses=responses,
temperature=temperature,
)
pg_loss, pg_clipfrac, ppo_kl, pg_clipfrac_lower = core_algos.compute_policy_loss(
old_log_prob=old_log_prob,
log_prob=log_prob,
advantages=advantages,
response_mask=response_mask,
cliprange_high=clip_ratio_high,
cliprange_low=clip_ratio_low,
)
loss = pg_loss / self.gradient_accumulation
loss.backward()
loss_info["actor/pg_loss"] = loss_info["actor/pg_loss"] + pg_loss.detach().item()
loss_info["actor/pg_clipfrac"] = loss_info["actor/pg_clipfrac"] + pg_clipfrac.detach().item()
loss_info["actor/ppo_kl"] = loss_info["actor/ppo_kl"] + ppo_kl.detach().item()
loss_info["actor/pg_clipfrac_lower"] = (
loss_info["actor/pg_clipfrac_lower"] + pg_clipfrac_lower.detach().item()
)
append_to_dict(metrics, loss_info)
grad_norm = self._optimizer_step()
mini_batch_metrics = {"actor/grad_norm": grad_norm.detach().item()}
append_to_dict(metrics, mini_batch_metrics)
self.actor_optimizer.zero_grad()
return metrics
|
verl__experimental__vla__dp_rob.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import os
import numpy as np
import torch
from omegaconf import DictConfig
from verl import DataProto
from verl.single_controller.ray import RayWorkerGroup
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class EnvLoop:
"""An env loop manages interactions between models and vectorized environments. It's designed for computationally
intensive environments, such as robotics simulators."""
def __init__(self, env_wg: RayWorkerGroup, rollout_wg: RayWorkerGroup, config: DictConfig):
"""
Initialize the EnvLoop.
Args:
env_wg (RayWorkerGroup): Environment worker group.
rollout_wg (RayWorkerGroup): Rollout worker group for model inference.
config (DictConfig): YAML config.
"""
self.env_wg = env_wg
self.rollout_wg = rollout_wg
self.config = config
# Extract relevant configuration
self.max_interactions = config.env.train.max_episode_steps // config.env.actor.model.num_action_chunks
self.stage_num = config.env.rollout.pipeline_stage_num
self.num_envs_per_worker = config.env.train.num_envs
self.action_dim = config.env.actor.model.action_dim
self.num_action_chunks = config.env.actor.model.num_action_chunks
# Derived properties
self.total_envs = self.env_wg.world_size * self.num_envs_per_worker
if self.total_envs % self.stage_num != 0:
raise ValueError(f"Total envs ({self.total_envs}) must be divisible by stage_num ({self.stage_num})")
self.envs_per_stage = self.total_envs // self.stage_num
self.env_wg.init_worker()
self.env_wg.init_simulator()
def generate_sequences(self, prompts: DataProto, reset_future: asyncio.Future) -> DataProto:
"""Split input batch and dispatch to env loop workers.
Args:
prompts (DataProto): Input batch.
Returns:
DataProto: Output batch.
"""
reset_results = reset_future.get()
loop = asyncio.get_event_loop()
self.rollout_wg.switch_to_rollout()
output = loop.run_until_complete(self.run(prompts, reset_results))
self.rollout_wg.switch_to_train()
# TODO(caiyunke.astra): add timing metrics
return output
async def run(self, prompts: DataProto, reset_results: DataProto) -> DataProto:
"""
Run the environment interaction loop.
This method orchestrates a pipelined process:
1. Resets environments to specified initial states.
2. In a loop, it gets actions from the rollout workers and applies them to the environments.
3. Collects all trajectory data (observations, actions, rewards, dones).
4. Formats and returns the collected trajectories as a single batch.
Args:
prompts (DataProto): Contains initial state IDs and other settings.
- 'non_tensor_batch.state_ids': A numpy array of state IDs to reset envs.
Returns:
DataProto: A batch containing the complete trajectories.
"""
initial_state_ids = prompts.non_tensor_batch["state_ids"]
staged_obs = self._restructure_obs_data(reset_results)
# --- Pipeline state ---
trajectories = {i: [] for i in range(self.stage_num)} # To store (obs, action, rew, done) tuples
rollout_futures = {}
# is_complete = torch.zeros((self.total_envs,), dtype=torch.bool)
for stage_id in range(self.stage_num):
# trajectories[stage_id].append({'obs': staged_obs[stage_id]})
trajectories[stage_id].append({})
vla_input = staged_obs[stage_id]
vla_input.meta_info = prompts.meta_info # Pass along rollout config
rollout_futures[stage_id] = self.rollout_wg.generate_sequences(vla_input)
async def _stage_loop(stage_id: int):
for step_idx in range(self.max_interactions):
action_result: DataProto = await asyncio.to_thread(rollout_futures[stage_id].get)
trajectories[stage_id][-1]["action"] = action_result
action_data = DataProto.from_dict(
non_tensors={"actions": action_result.batch["action"].cpu().numpy()},
meta_info={"stage_id": stage_id},
)
env_ref = self.env_wg.env_interact_step(action_data)
env_result: DataProto = await asyncio.to_thread(env_ref.get)
trajectories[stage_id][-1]["rew"] = env_result.batch["rews"]
trajectories[stage_id][-1]["done"] = env_result.batch["terminations"]
next_obs = DataProto(
batch=env_result.batch.select("full_image", "wrist_image", "state"),
non_tensor_batch={"task_descriptions": env_result.non_tensor_batch["task_descriptions"]},
)
if step_idx < self.max_interactions - 1:
trajectories[stage_id].append({})
vla_input = next_obs
vla_input.meta_info = prompts.meta_info
rollout_futures[stage_id] = self.rollout_wg.generate_sequences(vla_input)
await asyncio.gather(*[asyncio.create_task(_stage_loop(sid)) for sid in range(self.stage_num)])
self.env_wg.finish_rollout()
return self._collate_trajectories(trajectories, initial_state_ids, meta_info=prompts.meta_info)
def _restructure_obs_data(self, data_proto: DataProto) -> list[DataProto]:
"""Reshapes flat observation data from env_wg into a list of per-stage DataProto objects."""
# env_wg returns a flat batch ordered by [worker0_stage0, worker0_stage1, ...,
# worker1_stage0, worker1_stage1, ...]
# First, un-flatten by worker, then by stage
num_workers = self.env_wg.world_size
staged_data = [[] for _ in range(self.stage_num)]
chunks = data_proto.chunk(num_workers)
for worker_chunk in chunks:
stage_chunks = worker_chunk.chunk(self.stage_num)
for stage_id, data in enumerate(stage_chunks):
staged_data[stage_id].append(data)
# Concatenate data from all workers for each stage
return [DataProto.concat(data_list) for data_list in staged_data]
def _collate_trajectories(self, trajectories: dict, initial_state_ids: np.ndarray, meta_info) -> DataProto:
"""
Collates the collected trajectory data into the final batch format.
"""
flat_trajs = [{} for _ in range(len(trajectories[0]))]
for stage_id in range(self.stage_num):
for step_idx, step_data in enumerate(trajectories[stage_id]):
if not flat_trajs[step_idx]: # if dict is empty
flat_trajs[step_idx] = step_data
else:
# Concatenate DataProto objects
for key, value in step_data.items():
if isinstance(value, DataProto):
flat_trajs[step_idx][key] = DataProto.concat([flat_trajs[step_idx][key], value])
elif isinstance(value, torch.Tensor):
flat_trajs[step_idx][key] = torch.cat([flat_trajs[step_idx][key], value], dim=0)
# iterate all action batch keys (e.g., action, images, pixel_values, input_ids, ...)
batch_dict = {}
action_batch_keys = list(flat_trajs[0]["action"].batch.keys())
for key in action_batch_keys:
per_step_values = [step["action"].batch[key] for step in flat_trajs]
batch_dict[key] = torch.stack(per_step_values, dim=1)
batch_dict["complete"] = torch.stack([step["done"] for step in flat_trajs], dim=1).squeeze(-1)
batch_dict["env_state_id"] = torch.from_numpy(initial_state_ids.astype(int))
return DataProto.from_single_dict(batch_dict, meta_info=meta_info)
|
verl__experimental__vla__env_loop.py
|
# Copyright 2025 The RLinf Authors.
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from io import BytesIO
from typing import Any, Optional
import imageio
import numpy as np
import torch
import torchvision.transforms.functional as F
from PIL import Image, ImageDraw, ImageFont
def prepare_actions_simplevla(
raw_chunk_actions,
) -> torch.Tensor:
from verl.experimental.vla.envs.libero_env.utils import invert_gripper_action, normalize_gripper_action
normalized_action = normalize_gripper_action(raw_chunk_actions, binarize=True)
inverted_action = invert_gripper_action(normalized_action)
return inverted_action
def prepare_actions(
simulator_type,
raw_chunk_actions,
num_action_chunks,
action_dim,
action_scale: float = 1.0,
policy: str = "widowx_bridge",
) -> torch.Tensor:
# TODO: prepare_actions according to simulator_type
chunk_actions = prepare_actions_simplevla(
raw_chunk_actions=raw_chunk_actions,
)
return chunk_actions
def to_tensor(array: dict | torch.Tensor | np.ndarray | list | Any, device: str = "cpu") -> dict | torch.Tensor:
"""
Copied from ManiSkill!
Maps any given sequence to a torch tensor on the CPU/GPU. If physx gpu
is not enabled then we use CPU, otherwise GPU, unless specified
by the device argument
Args:
array: The data to map to a tensor
device: The device to put the tensor on. By default this is None
and to_tensor will put the device on the GPU if physx is enabled
and CPU otherwise
"""
if isinstance(array, (dict)):
return {k: to_tensor(v, device=device) for k, v in array.items()}
elif isinstance(array, torch.Tensor):
ret = array.to(device)
elif isinstance(array, np.ndarray):
if array.dtype == np.uint16:
array = array.astype(np.int32)
elif array.dtype == np.uint32:
array = array.astype(np.int64)
ret = torch.tensor(array).to(device)
else:
if isinstance(array, list) and isinstance(array[0], np.ndarray):
array = np.array(array)
ret = torch.tensor(array, device=device)
if ret.dtype == torch.float64:
ret = ret.to(torch.float32)
return ret
def tile_images(images: list[np.ndarray | torch.Tensor], nrows: int = 1) -> np.ndarray | torch.Tensor:
"""
Copied from maniskill https://github.com/haosulab/ManiSkill
Tile multiple images to a single image comprised of nrows and an
appropriate number of columns to fit all the images.
The images can also be batched (e.g. of shape (B, H, W, C)), but
give images must all have the same batch size.
if nrows is 1, images can be of different sizes. If nrows > 1,
they must all be the same size.
"""
# Sort images in descending order of vertical height
batched = False
if len(images[0].shape) == 4:
batched = True
if nrows == 1:
images = sorted(images, key=lambda x: x.shape[0 + batched], reverse=True)
columns: list[list[np.ndarray | torch.Tensor]] = []
if batched:
max_h = images[0].shape[1] * nrows
cur_h = 0
cur_w = images[0].shape[2]
else:
max_h = images[0].shape[0] * nrows
cur_h = 0
cur_w = images[0].shape[1]
# Arrange images in columns from left to right
column = []
for im in images:
if cur_h + im.shape[0 + batched] <= max_h and cur_w == im.shape[1 + batched]:
column.append(im)
cur_h += im.shape[0 + batched]
else:
columns.append(column)
column = [im]
cur_h, cur_w = im.shape[0 + batched : 2 + batched]
columns.append(column)
# Tile columns
total_width = sum(x[0].shape[1 + batched] for x in columns)
is_torch = False
if torch is not None:
is_torch = isinstance(images[0], torch.Tensor)
output_shape = (max_h, total_width, 3)
if batched:
output_shape = (images[0].shape[0], max_h, total_width, 3)
if is_torch:
output_image = torch.zeros(output_shape, dtype=images[0].dtype)
else:
output_image = np.zeros(output_shape, dtype=images[0].dtype)
cur_x = 0
for column in columns:
cur_w = column[0].shape[1 + batched]
next_x = cur_x + cur_w
if is_torch:
column_image = torch.concatenate(column, dim=0 + batched)
else:
column_image = np.concatenate(column, axis=0 + batched)
cur_h = column_image.shape[0 + batched]
output_image[..., :cur_h, cur_x:next_x, :] = column_image
cur_x = next_x
return output_image
def put_text_on_image(image: np.ndarray, lines: list[str], max_width: int = 200) -> np.ndarray:
"""
Put text lines on an image with automatic line wrapping.
Args:
image: Input image as numpy array
lines: List of text lines to add
max_width: Maximum width for text wrapping
"""
assert image.dtype == np.uint8, image.dtype
image = image.copy()
image = Image.fromarray(image)
draw = ImageDraw.Draw(image)
font = ImageFont.load_default(size=20)
new_lines = []
for line in lines:
words = line.split()
current_line = []
for word in words:
test_line = " ".join(current_line + [word])
test_width = font.getlength(test_line)
if test_width <= max_width:
current_line.append(word)
else:
new_lines.append(" ".join(current_line))
current_line = [word]
if current_line:
new_lines.append(" ".join(current_line))
y = -10
for line in new_lines:
bbox = draw.textbbox((0, 0), text=line)
textheight = bbox[3] - bbox[1]
y += textheight + 10
x = 10
draw.text((x, y), text=line, fill=(0, 0, 0))
return np.array(image)
def put_info_on_image(
image: np.ndarray,
info: dict[str, float],
extras: Optional[list[str]] = None,
overlay: bool = True,
) -> np.ndarray:
"""
Put information dictionary and extra lines on an image.
Args:
image: Input image
info: Dictionary of key-value pairs to display
extras: Additional text lines to display
overlay: Whether to overlay text on image
"""
lines = [f"{k}: {v:.3f}" if isinstance(v, float) else f"{k}: {v}" for k, v in info.items()]
if extras is not None:
lines.extend(extras)
return put_text_on_image(image, lines)
def list_of_dict_to_dict_of_list(
list_of_dict: list[dict[str, Any]],
) -> dict[str, list[Any]]:
"""
Convert a list of dictionaries to a dictionary of lists.
Args:
list_of_dict: List of dictionaries with same keys
Returns:
Dictionary where each key maps to a list of values
"""
if len(list_of_dict) == 0:
return {}
keys = list_of_dict[0].keys()
output = {key: [] for key in keys}
for data in list_of_dict:
for key, item in data.items():
assert key in output
output[key].append(item)
return output
def save_rollout_video(rollout_images: list[np.ndarray], output_dir: str, video_name: str, fps: int = 30) -> None:
"""
Saves an MP4 replay of an episode.
Args:
rollout_images: List of images from the episode
output_dir: Directory to save the video
video_name: Name of the output video file
fps: Frames per second for the video
"""
os.makedirs(output_dir, exist_ok=True)
mp4_path = os.path.join(output_dir, f"{video_name}.mp4")
video_writer = imageio.get_writer(mp4_path, fps=fps)
for img in rollout_images:
video_writer.append_data(img)
video_writer.close()
def resize_image(img: np.ndarray, resize_size: tuple[int, int]) -> np.ndarray:
"""
Takes numpy array corresponding to a single image and returns resized image as numpy array.
Args:
img: Input image as numpy array
resize_size: Target size for resizing
Returns:
Resized image as numpy array
"""
assert isinstance(resize_size, tuple), "resize_size must be a tuple"
assert isinstance(img, np.ndarray), "img must be a numpy array"
# Convert numpy array to PIL Image
pil_img = Image.fromarray(img)
# Encode as JPEG, as done in RLDS dataset builder
buffer = BytesIO()
pil_img.save(buffer, format="JPEG")
buffer.seek(0)
# Immediately decode back
img = Image.open(buffer)
img = img.resize(resize_size, Image.Resampling.LANCZOS)
img = np.array(img)
img = np.clip(np.round(img), 0, 255).astype(np.uint8)
return img
def center_crop_image(image: Image.Image) -> Image.Image:
crop_scale = 0.9
orig_w, orig_h = image.size
image_tensor = F.to_tensor(image)
crop_h = int(orig_h * crop_scale)
crop_w = int(orig_w * crop_scale)
image_tensor = F.center_crop(image_tensor, (crop_h, crop_w))
image_tensor = F.resize(image_tensor, (orig_h, orig_w))
final_image = F.to_pil_image(image_tensor)
final_image = final_image.convert("RGB")
return final_image
|
verl__experimental__vla__envs__action_utils.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import Optional
import gymnasium as gym
import numpy as np
import omni
import torch
from verl.experimental.vla.envs.action_utils import (
put_info_on_image,
save_rollout_video,
tile_images,
to_tensor,
)
logger = logging.getLogger(__name__)
class IsaacEnv(gym.Env):
def __init__(self, cfg, rank, world_size):
self.rank = rank
self.cfg = cfg
self.world_size = world_size
self.seed = self.cfg.seed + rank
self.num_envs = self.cfg.num_envs
self.action_dim = self.cfg.get("action_dim", 7)
self.device = self.cfg.get("device", "cuda:0")
self._generator = np.random.default_rng(seed=self.seed)
self.task_suite_name = self.cfg.task_suite_name
self.env = None
self.prev_step_reward = np.zeros(self.num_envs)
self.use_rel_reward = False
self._init_metrics()
self._elapsed_steps = np.zeros(self.num_envs, dtype=np.int32)
self.max_episode_steps = cfg.max_episode_steps
self.video_cfg = cfg.video_cfg
self.render_images = []
self.video_cnt = 0
self.camera_name = cfg.init_params.camera_names
# sys env must be set before import isaaclab
from isaaclab.app import AppLauncher
launch_args = {"headless": True, "enable_cameras": True}
app_launcher = AppLauncher(**launch_args)
self.app = app_launcher.app
# force franka registration
import isaaclab_playground.tasks.manipulation.libero.config.franka # noqa
def _init_env(self, task_id=0):
"""Initializes the Isaac Sim environment."""
self.task_name = self.cfg.get("task_name")
self.task_id = task_id
# FIXME since isaac use env to set task id, all env have to use the same task id
if self.task_suite_name.startswith("libero"):
os.environ["LIBERO_TASK_SUITE"] = self.task_suite_name
os.environ["LIBERO_TASK_ID"] = str(task_id)
os.environ["LIBERO_OSC_TYPE"] = "pose_rel"
if not self.task_name:
self.task_name = "Isaac-Libero-Franka-OscPose-v0"
from isaaclab_tasks.utils import parse_env_cfg
self.env_cfg = parse_env_cfg(self.task_name, num_envs=self.num_envs)
self.env_cfg.env_name = self.cfg.get("env_name", str(self.task_id))
self.env_cfg.sim.device = self.device
self.env_cfg.sim.physx.enable_ccd = True
self.env_cfg.terminations.time_out = None
self.env_cfg.observations.policy.concatenate_terms = False
# create environment from loaded config
if self.env:
self.env.close()
omni.usd.get_context().new_stage()
self.env = gym.make(self.task_name, cfg=self.env_cfg).unwrapped
if self.cfg.video_cfg.save_video:
video_dir = os.path.join(self.cfg.video_cfg.video_base_dir, f"rank_{self.rank}")
os.makedirs(video_dir, exist_ok=True)
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
# TODO support other task suite
if self.task_suite_name.startswith("libero"):
self.task_descriptions = self.env.cfg.libero_config.task_info["language_instruction"]
assert self.env_cfg.osc_type == "pose_rel", (
f"Only pose_rel osc type is supported for libero. Received: {self.env_cfg.osc_type}"
)
else:
raise ValueError(f"Task suite {self.task_suite_name} is not supported.")
logger.info("Isaac Sim environment initialized")
def _init_metrics(self):
self.success_once = np.zeros(self.num_envs, dtype=bool)
self.returns = np.zeros(self.num_envs)
def _reset_metrics(self, env_idx=None):
if env_idx is not None:
mask = np.zeros(self.num_envs, dtype=bool)
mask[env_idx] = True
self.prev_step_reward[mask] = 0.0
self.success_once[mask] = False
self.returns[mask] = 0
self._elapsed_steps[env_idx] = 0
else:
self.prev_step_reward[:] = 0
self.success_once[:] = False
self.returns[:] = 0.0
self._elapsed_steps[:] = 0
def _record_metrics(self, step_reward, terminations, infos):
episode_info = {}
self.returns += step_reward
# Ensure terminations is a numpy array before the bitwise OR
if isinstance(terminations, torch.Tensor):
terminations = terminations.cpu().numpy()
self.success_once = self.success_once | terminations
episode_info["success_once"] = self.success_once.copy()
episode_info["return"] = self.returns.copy()
episode_info["episode_len"] = self.elapsed_steps.copy()
if any(self.elapsed_steps > 0):
episode_info["reward"] = episode_info["return"] / self.elapsed_steps
else:
episode_info["reward"] = 0
infos["episode"] = to_tensor(episode_info)
return infos
def reset(self, env_idx: Optional[int | list[int] | np.ndarray] = None, options: Optional[dict] = None):
if env_idx is None:
env_idx = np.arange(self.num_envs)
raw_obs, infos = self.env.reset()
obs = self._wrap_obs(raw_obs)
self._reset_metrics(env_idx)
return obs, infos
def step(self, actions=None):
if actions is None:
# isaac should start with reset_envs_to_initial_state
# do nothing for None
return (None, None, None, None, None)
truncations = self.elapsed_steps >= self.max_episode_steps
# _actions = torch.zeros(self.action_space.shape)
if isinstance(actions, np.ndarray):
actions = torch.from_numpy(actions)
self._elapsed_steps += 1
raw_obs, _reward, terminations, _, infos = self.env.step(actions)
self.last_obs = raw_obs
self.last_infos = infos
obs = self._wrap_obs(raw_obs)
step_reward = self._calc_step_reward(_reward.cpu().numpy())
if self.video_cfg.save_video:
plot_infos = {
"rewards": step_reward,
"terminations": terminations,
"task": self.task_descriptions,
}
self.add_new_frames(obs, plot_infos)
infos = self._record_metrics(step_reward, terminations, infos)
return (
obs,
to_tensor(step_reward),
to_tensor(terminations),
to_tensor(truncations),
infos,
)
def chunk_step(self, chunk_actions):
# chunk_actions: [num_envs, chunk_step, action_dim]
chunk_size = chunk_actions.shape[1]
chunk_rewards = []
raw_chunk_terminations = []
raw_chunk_truncations = []
for i in range(chunk_size):
actions = chunk_actions[:, i]
extracted_obs, step_reward, terminations, truncations, infos = self.step(actions)
chunk_rewards.append(step_reward)
raw_chunk_terminations.append(terminations)
raw_chunk_truncations.append(truncations)
chunk_rewards = torch.stack(chunk_rewards, dim=1) # [num_envs, chunk_steps]
raw_chunk_terminations = torch.stack(raw_chunk_terminations, dim=1) # [num_envs, chunk_steps]
raw_chunk_truncations = torch.stack(raw_chunk_truncations, dim=1) # [num_envs, chunk_steps]
chunk_terminations = raw_chunk_terminations.clone()
chunk_truncations = raw_chunk_truncations.clone()
return (
extracted_obs,
chunk_rewards,
chunk_terminations,
chunk_truncations,
infos,
)
def _calc_step_reward(self, reward):
if self.use_rel_reward:
reward_diff = reward - self.prev_step_reward
self.prev_step_reward = reward
return reward_diff
else:
return reward
def _wrap_obs(self, raw_obs):
images_and_states = self._extract_image_and_state(raw_obs)
obs = {
"images_and_states": to_tensor(images_and_states),
"task_descriptions": [self.task_descriptions] * self.num_envs,
}
return obs
def _extract_image_and_state(self, obs):
# TODO support multiple camera
camera_name = self.camera_name[0]
for key in self.env.unwrapped.scene.keys():
if key.startswith(camera_name):
cam = self.env.unwrapped.scene[key]
break
assert cam is not None, f"camera {camera_name} not found in scene"
rgb = cam.data.output["rgb"]
full_image = rgb.cpu().numpy()
return {
"full_image": full_image,
"state": np.concatenate(
[
obs["policy"]["eef_pose"].cpu(),
# quat2axisangle(obs["robot0_eef_quat"]), # isaac do not return robot0_eef_quat
# obs["policy"]["gripper_pos"].cpu(),
],
axis=-1,
),
}
def add_new_frames(self, obs, plot_infos):
images = []
for env_id, img in enumerate(obs["images_and_states"]["full_image"]):
info_item = {k: v if np.size(v) == 1 else v[env_id] for k, v in plot_infos.items()}
img = put_info_on_image(img.cpu().numpy(), info_item)
images.append(img)
full_image = tile_images(images, nrows=int(np.sqrt(self.num_envs)))
self.render_images.append(full_image)
def flush_video(self, video_sub_dir: Optional[str] = None):
output_dir = os.path.join(self.video_cfg.video_base_dir, f"rank_{self.rank}")
if video_sub_dir is not None:
output_dir = os.path.join(output_dir, f"{video_sub_dir}")
save_rollout_video(
self.render_images,
output_dir=output_dir,
video_name=f"{self.video_cnt}",
)
self.video_cnt += 1
self.render_images = []
def close(self):
if self.env is not None:
self.env.close()
self.app.close()
def load_state(self, state_buffer: bytes):
self.env.load_state(state_buffer)
def get_state(self):
return None
def reset_envs_to_state_ids(self, state_ids_list, task_ids_list):
logger.info(f"IsaacEnv reset_envs_to_state_ids task_ids_list: {task_ids_list}")
assert len(set(task_ids_list)) == 1, "Isaac env only support single task"
self._init_env(task_ids_list[0])
# In Isaac, reset to random status in groups to have more test coverage
# TODO support reset in group with options = {"group": len(set(state_ids_list))}
raw_obs, infos = self.env.reset()
env_idx = np.arange(self.num_envs)
self._reset_metrics(env_idx)
self.elapsed_steps = np.zeros(self.num_envs, dtype=np.int32)
# stablize the environment
for _ in range(10):
zero_actions = torch.zeros((self.num_envs, self.action_dim), device=self.device)
raw_obs, _, _, _, infos = self.env.step(zero_actions)
obs = self._wrap_obs(raw_obs)
return obs, infos
|
verl__experimental__vla__envs__isaac_env__isaac_env.py
|
# Copyright 2025 The RLinf Authors.
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import Optional
import gymnasium as gym
import numpy as np
import torch
from libero.libero import get_libero_path
from libero.libero.benchmark import Benchmark, get_benchmark
from libero.libero.envs import OffScreenRenderEnv
from omegaconf.omegaconf import OmegaConf
from verl.experimental.vla.envs.action_utils import (
list_of_dict_to_dict_of_list,
put_info_on_image,
save_rollout_video,
tile_images,
to_tensor,
)
from verl.experimental.vla.envs.libero_env.utils import get_libero_image, get_libero_wrist_image, quat2axisangle
from verl.experimental.vla.envs.libero_env.venv import ReconfigureSubprocEnv
logger = logging.getLogger(__name__)
def patched_get_task_init_states(self, i):
init_states_path = os.path.join(
get_libero_path("init_states"),
self.tasks[i].problem_folder,
self.tasks[i].init_states_file,
)
init_states = torch.load(init_states_path, weights_only=False)
return init_states
Benchmark.get_task_init_states = patched_get_task_init_states
class LiberoEnv(gym.Env):
def __init__(self, cfg, rank, world_size):
self.rank = rank
self.cfg = cfg
self.world_size = world_size
self.seed = self.cfg.seed + rank
self.num_envs = self.cfg.num_envs
self.ignore_terminations = False
self._generator = np.random.default_rng(seed=self.seed)
self._generator_ordered = np.random.default_rng(seed=0)
self.start_idx = 0
self.task_suite: Benchmark = get_benchmark(cfg.task_suite_name)()
self._compute_total_num_group_envs()
self.reset_state_ids_all = self.get_reset_state_ids_all()
self.reset_state_ids = self._get_ordered_reset_state_ids(self.num_envs)
self._init_task_and_trial_ids()
self._init_env()
self.prev_step_reward = np.zeros(self.num_envs)
self.use_rel_reward = False
self._init_metrics()
self._elapsed_steps = np.zeros(self.num_envs, dtype=np.int32)
self.video_cfg = cfg.video_cfg
self.video_cnt = 0
self.render_images = []
@property
def elapsed_steps(self):
return self._elapsed_steps
def get_all_state_ids(self):
"""Returns all possible state IDs from the entire benchmark."""
return np.arange(self.total_num_group_envs) # (total_num_states,)
def _init_env(self):
env_fns = self.get_env_fns()
self.env = ReconfigureSubprocEnv(env_fns)
def get_env_fns(self):
env_fn_params = self.get_env_fn_params()
env_fns = []
for env_fn_param in env_fn_params:
def env_fn(param=env_fn_param):
seed = param.pop("seed")
env = OffScreenRenderEnv(**param)
env.seed(seed)
return env
env_fns.append(env_fn)
return env_fns
def get_env_fn_params(self, env_idx=None):
env_fn_params = []
base_env_args = OmegaConf.to_container(self.cfg.init_params, resolve=True)
task_descriptions = []
if env_idx is None:
env_idx = np.arange(self.cfg.num_envs)
for env_id in range(self.cfg.num_envs):
if env_id not in env_idx:
task_descriptions.append(self.task_descriptions[env_id])
continue
task = self.task_suite.get_task(self.task_ids[env_id])
task_bddl_file = os.path.join(get_libero_path("bddl_files"), task.problem_folder, task.bddl_file)
env_fn_params.append(
{
**base_env_args,
"bddl_file_name": task_bddl_file,
"seed": self.seed,
}
)
task_descriptions.append(task.language)
self.task_descriptions = task_descriptions
return env_fn_params
def _compute_total_num_group_envs(self):
self.total_num_group_envs = 0
self.trial_id_bins = []
for task_id in range(self.task_suite.get_num_tasks()):
task_num_trials = len(self.task_suite.get_task_init_states(task_id))
self.trial_id_bins.append(task_num_trials)
self.total_num_group_envs += task_num_trials
self.cumsum_trial_id_bins = np.cumsum(self.trial_id_bins)
def _init_task_and_trial_ids(self):
self.task_ids, self.trial_ids = self._get_task_and_trial_ids_from_reset_state_ids(self.reset_state_ids)
def _get_random_reset_state_ids(self, num_reset_states):
reset_state_ids = self._generator.integers(low=0, high=self.total_num_group_envs, size=(num_reset_states,))
return reset_state_ids
def get_reset_state_ids_all(self):
reset_state_ids = np.arange(self.total_num_group_envs)
valid_size = len(reset_state_ids) - (len(reset_state_ids) % self.world_size)
if not self.cfg.only_eval:
self._generator_ordered.shuffle(reset_state_ids)
reset_state_ids = reset_state_ids[:valid_size]
reset_state_ids = reset_state_ids.reshape(self.world_size, -1)
return reset_state_ids
def _get_ordered_reset_state_ids(self, num_reset_states):
reset_state_ids = self.reset_state_ids_all[self.rank][self.start_idx : self.start_idx + num_reset_states]
self.start_idx = self.start_idx + num_reset_states
if self.start_idx >= len(self.reset_state_ids_all[0]):
self.reset_state_ids_all = self.get_reset_state_ids_all()
self.start_idx = 0
return reset_state_ids
def _get_task_and_trial_ids_from_reset_state_ids(self, reset_state_ids):
task_ids = []
trial_ids = []
# get task id and trial id from reset state ids
for reset_state_id in reset_state_ids:
start_pivot = 0
for task_id, end_pivot in enumerate(self.cumsum_trial_id_bins):
if reset_state_id < end_pivot and reset_state_id >= start_pivot:
task_ids.append(task_id)
trial_ids.append(reset_state_id - start_pivot)
break
start_pivot = end_pivot
logger.debug(
"get task and trial id",
self.cumsum_trial_id_bins,
reset_state_ids,
task_ids,
trial_ids,
)
return np.array(task_ids), np.array(trial_ids)
def _get_reset_states(self, env_idx):
if env_idx is None:
env_idx = np.arange(self.num_envs)
init_state = [
self.task_suite.get_task_init_states(self.task_ids[env_id])[self.trial_ids[env_id]] for env_id in env_idx
]
return init_state
def _init_metrics(self):
self.success_once = np.zeros(self.num_envs, dtype=bool)
self.fail_once = np.zeros(self.num_envs, dtype=bool)
self.returns = np.zeros(self.num_envs)
def _reset_metrics(self, env_idx=None):
if env_idx is not None:
mask = np.zeros(self.num_envs, dtype=bool)
mask[env_idx] = True
self.prev_step_reward[mask] = 0.0
self.success_once[mask] = False
self.fail_once[mask] = False
self.returns[mask] = 0
self._elapsed_steps[env_idx] = 0
else:
self.prev_step_reward[:] = 0
self.success_once[:] = False
self.fail_once[:] = False
self.returns[:] = 0.0
self._elapsed_steps[:] = 0
def _record_metrics(self, step_reward, terminations, infos):
episode_info = {}
self.returns += step_reward
self.success_once = self.success_once | terminations
episode_info["success_once"] = self.success_once.copy()
episode_info["return"] = self.returns.copy()
episode_info["episode_len"] = self.elapsed_steps.copy()
episode_info["reward"] = episode_info["return"] / episode_info["episode_len"]
infos["episode"] = to_tensor(episode_info)
return infos
def _extract_image_and_state(self, obs):
return {
"full_image": get_libero_image(obs),
"wrist_image": get_libero_wrist_image(obs),
"state": np.concatenate(
[
obs["robot0_eef_pos"],
quat2axisangle(obs["robot0_eef_quat"]),
obs["robot0_gripper_qpos"],
]
),
}
def _wrap_obs(self, obs_list):
images_and_states_list = []
for obs in obs_list:
images_and_states = self._extract_image_and_state(obs)
images_and_states_list.append(images_and_states)
obs = {
"images_and_states": to_tensor(list_of_dict_to_dict_of_list(images_and_states_list)),
"task_descriptions": self.task_descriptions,
}
return obs
def _reconfigure(self, reset_state_ids, env_idx):
reconfig_env_idx = []
task_ids, trial_ids = self._get_task_and_trial_ids_from_reset_state_ids(reset_state_ids)
for j, env_id in enumerate(env_idx):
if self.task_ids[env_id] != task_ids[j]:
reconfig_env_idx.append(env_id)
self.task_ids[env_id] = task_ids[j]
self.trial_ids[env_id] = trial_ids[j]
if reconfig_env_idx:
env_fn_params = self.get_env_fn_params(reconfig_env_idx)
self.env.reconfigure_env_fns(env_fn_params, reconfig_env_idx)
self.env.seed([0] * len(env_idx))
self.env.reset(id=env_idx)
init_state = self._get_reset_states(env_idx=env_idx)
self.env.set_init_state(init_state=init_state, id=env_idx)
def reset(
self,
env_idx: Optional[int | list[int] | np.ndarray] = None,
reset_state_ids=None,
options: Optional[dict] = None,
):
if env_idx is None:
env_idx = np.arange(self.num_envs)
if reset_state_ids is None:
num_reset_states = len(env_idx)
reset_state_ids = self._get_random_reset_state_ids(num_reset_states)
self._reconfigure(reset_state_ids, env_idx)
for _ in range(10):
zero_actions = np.zeros((self.num_envs, 7))
raw_obs, _reward, terminations, info_lists = self.env.step(zero_actions)
obs = self._wrap_obs(raw_obs)
if env_idx is not None:
self._reset_metrics(env_idx)
else:
self._reset_metrics()
infos = {}
return obs, infos
def step(self, actions=None):
if actions is None:
obs, infos = self.reset(reset_state_ids=self.reset_state_ids)
terminations = np.zeros(self.num_envs, dtype=bool)
truncations = np.zeros(self.num_envs, dtype=bool)
return obs, None, to_tensor(terminations), to_tensor(truncations), infos
if isinstance(actions, torch.Tensor):
actions = actions.detach().cpu().numpy()
self._elapsed_steps += 1
raw_obs, _reward, terminations, info_lists = self.env.step(actions)
infos = list_of_dict_to_dict_of_list(info_lists)
truncations = self.elapsed_steps >= self.cfg.max_episode_steps
obs = self._wrap_obs(raw_obs)
step_reward = self._calc_step_reward(terminations)
if self.video_cfg.save_video:
plot_infos = {
"rewards": step_reward,
"terminations": terminations,
"task": self.task_descriptions,
}
self.add_new_frames(raw_obs, plot_infos)
infos = self._record_metrics(step_reward, terminations, infos)
return (
obs,
to_tensor(step_reward),
to_tensor(terminations),
to_tensor(truncations),
infos,
)
def chunk_step(self, chunk_actions):
# chunk_actions: [num_envs, chunk_step, action_dim]
chunk_size = chunk_actions.shape[1]
chunk_rewards = []
raw_chunk_terminations = []
raw_chunk_truncations = []
for i in range(chunk_size):
actions = chunk_actions[:, i]
extracted_obs, step_reward, terminations, truncations, infos = self.step(actions)
chunk_rewards.append(step_reward)
raw_chunk_terminations.append(terminations)
raw_chunk_truncations.append(truncations)
chunk_rewards = torch.stack(chunk_rewards, dim=1) # [num_envs, chunk_steps]
raw_chunk_terminations = torch.stack(raw_chunk_terminations, dim=1) # [num_envs, chunk_steps]
raw_chunk_truncations = torch.stack(raw_chunk_truncations, dim=1) # [num_envs, chunk_steps]
chunk_terminations = raw_chunk_terminations.clone()
chunk_truncations = raw_chunk_truncations.clone()
return (
extracted_obs,
chunk_rewards,
chunk_terminations,
chunk_truncations,
infos,
)
def _calc_step_reward(self, terminations):
reward = self.cfg.reward_coef * terminations
reward_diff = reward - self.prev_step_reward
self.prev_step_reward = reward
if self.use_rel_reward:
return reward_diff
else:
return reward
def add_new_frames(self, raw_obs, plot_infos):
images = []
for env_id, raw_single_obs in enumerate(raw_obs):
info_item = {k: v if np.size(v) == 1 else v[env_id] for k, v in plot_infos.items()}
img = raw_single_obs["agentview_image"][::-1, ::-1]
img = put_info_on_image(img, info_item)
images.append(img)
full_image = tile_images(images, nrows=int(np.sqrt(self.num_envs)))
self.render_images.append(full_image)
def flush_video(self, video_sub_dir: Optional[str] = None):
output_dir = os.path.join(self.video_cfg.video_base_dir, f"rank_{self.rank}")
if video_sub_dir is not None:
output_dir = os.path.join(output_dir, f"{video_sub_dir}")
save_rollout_video(
self.render_images,
output_dir=output_dir,
video_name=f"{self.video_cnt}",
)
self.video_cnt += 1
self.render_images = []
def reset_envs_to_state_ids(self, state_ids_list, task_ids_list):
"""Reset environments to specified state IDs.
Args:
state_ids_list: List of state IDs to reset environments to
"""
env_idx = np.arange(len(state_ids_list))
obs, infos = self.reset(env_idx=env_idx, reset_state_ids=state_ids_list)
return obs, infos
def load_state(self, state_buffer: bytes):
self.env.load_state(state_buffer)
|
verl__experimental__vla__envs__libero_env__libero_env.py
|
# Copyright 2025 The RLinf Authors.
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for evaluating policies in LIBERO simulation environments."""
import math
import numpy as np
def get_libero_image(obs: dict[str, np.ndarray]) -> np.ndarray:
"""
Extracts image from observations and preprocesses it.
Args:
obs: Observation dictionary from LIBERO environment
Returns:
Preprocessed image as numpy array
"""
img = obs["agentview_image"]
img = img[::-1, ::-1] # IMPORTANT: rotate 180 degrees to match train preprocessing
return img
def get_libero_wrist_image(obs: dict[str, np.ndarray]) -> np.ndarray:
"""
Extracts wrist camera image from observations and preprocesses it.
Args:
obs: Observation dictionary from LIBERO environment
Returns:
Preprocessed wrist camera image as numpy array
"""
img = obs["robot0_eye_in_hand_image"]
img = img[::-1, ::-1] # IMPORTANT: rotate 180 degrees to match train preprocessing
return img
def quat2axisangle(quat: np.ndarray) -> np.ndarray:
"""
Copied from robosuite: https://github.com/ARISE-Initiative/robosuite/blob/eafb81f54ffc104f905ee48a16bb15f059176ad3/robosuite/utils/transform_utils.py#L490C1-L512C55
Converts quaternion to axis-angle format.
Returns a unit vector direction scaled by its angle in radians.
Args:
quat (np.array): (x,y,z,w) vec4 float angles
Returns:
np.array: (ax,ay,az) axis-angle exponential coordinates
"""
# clip quaternion
if quat[3] > 1.0:
quat[3] = 1.0
elif quat[3] < -1.0:
quat[3] = -1.0
den = np.sqrt(1.0 - quat[3] * quat[3])
if math.isclose(den, 0.0):
# This is (close to) a zero degree rotation, immediately return
return np.zeros(3)
return (quat[:3] * 2.0 * math.acos(quat[3])) / den
def normalize_gripper_action(action: np.ndarray, binarize: bool = True) -> np.ndarray:
"""
Normalize gripper action from [0,1] to [-1,+1] range.
This is necessary for some environments because the dataset wrapper
standardizes gripper actions to [0,1]. Note that unlike the other action
dimensions, the gripper action is not normalized to [-1,+1] by default.
Normalization formula: y = 2 * (x - orig_low) / (orig_high - orig_low) - 1
Args:
action: Action array with gripper action in the last dimension
binarize: Whether to binarize gripper action to -1 or +1
Returns:
np.ndarray: Action array with normalized gripper action
"""
# Create a copy to avoid modifying the original
normalized_action = action.copy()
# Normalize the last action dimension to [-1,+1]
orig_low, orig_high = 0.0, 1.0
normalized_action[..., -1] = 2 * (normalized_action[..., -1] - orig_low) / (orig_high - orig_low) - 1
if binarize:
# Binarize to -1 or +1
normalized_action[..., -1] = np.sign(normalized_action[..., -1])
return normalized_action
def invert_gripper_action(action: np.ndarray) -> np.ndarray:
"""
Flip the sign of the gripper action (last dimension of action vector).
This is necessary for environments where -1 = open, +1 = close, since
the RLDS dataloader aligns gripper actions such that 0 = close, 1 = open.
Args:
action: Action array with gripper action in the last dimension
Returns:
np.ndarray: Action array with inverted gripper action
"""
# Create a copy to avoid modifying the original
inverted_action = action.copy()
# Invert the gripper action
inverted_action[..., -1] = inverted_action[..., -1] * -1.0
return inverted_action
|
verl__experimental__vla__envs__libero_env__utils.py
|
# Copyright 2025 The RLinf Authors.
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing import Pipe, connection
from multiprocessing.context import Process
from typing import Any, Callable, Optional
import gymnasium as gym
import numpy as np
from libero.libero.envs import OffScreenRenderEnv
from libero.libero.envs.venv import (
BaseVectorEnv,
CloudpickleWrapper,
EnvWorker,
ShArray,
SubprocEnvWorker,
SubprocVectorEnv,
_setup_buf,
)
def _worker(
parent: connection.Connection,
p: connection.Connection,
env_fn_wrapper: CloudpickleWrapper,
obs_bufs: Optional[dict | tuple | ShArray] = None,
) -> None:
def _encode_obs(obs: dict | tuple | np.ndarray, buffer: dict | tuple | ShArray) -> None:
if isinstance(obs, np.ndarray) and isinstance(buffer, ShArray):
buffer.save(obs)
elif isinstance(obs, tuple) and isinstance(buffer, tuple):
for o, b in zip(obs, buffer, strict=False):
_encode_obs(o, b)
elif isinstance(obs, dict) and isinstance(buffer, dict):
for k in obs.keys():
_encode_obs(obs[k], buffer[k])
return None
parent.close()
env = env_fn_wrapper.data()
try:
while True:
try:
cmd, data = p.recv()
except EOFError: # the pipe has been closed
p.close()
break
if cmd == "step":
env_return = env.step(data)
if obs_bufs is not None:
_encode_obs(env_return[0], obs_bufs)
env_return = (None, *env_return[1:])
p.send(env_return)
elif cmd == "reset":
retval = env.reset(**data)
reset_returns_info = (
isinstance(retval, (tuple | list)) and len(retval) == 2 and isinstance(retval[1], dict)
)
if reset_returns_info:
obs, info = retval
else:
obs = retval
if obs_bufs is not None:
_encode_obs(obs, obs_bufs)
obs = None
if reset_returns_info:
p.send((obs, info))
else:
p.send(obs)
elif cmd == "close":
p.send(env.close())
p.close()
break
elif cmd == "render":
p.send(env.render(**data) if hasattr(env, "render") else None)
elif cmd == "seed":
if hasattr(env, "seed"):
p.send(env.seed(data))
else:
env.reset(seed=data)
p.send(None)
elif cmd == "getattr":
p.send(getattr(env, data) if hasattr(env, data) else None)
elif cmd == "setattr":
setattr(env.unwrapped, data["key"], data["value"])
elif cmd == "check_success":
p.send(env.check_success())
elif cmd == "get_segmentation_of_interest":
p.send(env.get_segmentation_of_interest(data))
elif cmd == "get_sim_state":
p.send(env.get_sim_state())
elif cmd == "set_init_state":
obs = env.set_init_state(data)
p.send(obs)
elif cmd == "reconfigure":
env.close()
seed = data.pop("seed")
env = OffScreenRenderEnv(**data)
env.seed(seed)
p.send(None)
else:
p.close()
raise NotImplementedError
except KeyboardInterrupt:
p.close()
class ReconfigureSubprocEnvWorker(SubprocEnvWorker):
def __init__(self, env_fn: Callable[[], gym.Env], share_memory: bool = False):
self.parent_remote, self.child_remote = Pipe()
self.share_memory = share_memory
self.buffer: Optional[dict | tuple | ShArray] = None
if self.share_memory:
dummy = env_fn()
obs_space = dummy.observation_space
dummy.close()
del dummy
self.buffer = _setup_buf(obs_space)
args = (
self.parent_remote,
self.child_remote,
CloudpickleWrapper(env_fn),
self.buffer,
)
self.process = Process(target=_worker, args=args, daemon=True)
self.process.start()
self.child_remote.close()
EnvWorker.__init__(self, env_fn)
def reconfigure_env_fn(self, env_fn_param):
self.parent_remote.send(["reconfigure", env_fn_param])
return self.parent_remote.recv()
class ReconfigureSubprocEnv(SubprocVectorEnv):
def __init__(self, env_fns: list[Callable[[], gym.Env]], **kwargs: Any) -> None:
def worker_fn(fn: Callable[[], gym.Env]) -> ReconfigureSubprocEnvWorker:
return ReconfigureSubprocEnvWorker(fn, share_memory=False)
BaseVectorEnv.__init__(self, env_fns, worker_fn, **kwargs)
def reconfigure_env_fns(self, env_fns, id=None):
self._assert_is_not_closed()
id = self._wrap_id(id)
if self.is_async:
self._assert_id(id)
for j, i in enumerate(id):
self.workers[i].reconfigure_env_fn(env_fns[j])
|
verl__experimental__vla__envs__libero_env__venv.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The main entry point to run the PPO algorithm
"""
import asyncio
import contextlib
import logging
import os
import torch
import torch.distributed
from packaging import version
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.fsdp import FSDPModule
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._unshard_param_utils import _get_module_fsdp_state, _unshard_params_for_summon
from torch.distributed.fsdp.api import FullStateDictConfig, ShardedStateDictConfig, StateDictType
from verl import DataProto
from verl.single_controller.base.decorator import Dispatch, make_nd_compute_dataproto_dispatch_fn, register
from verl.utils.checkpoint.fsdp_checkpoint_manager import FSDPCheckpointManager
from verl.utils.config import omega_conf_to_dataclass
from verl.utils.device import get_device_id, get_device_name, get_torch_device, set_expandable_segments
from verl.utils.flops_counter import FlopsCounter
from verl.utils.fsdp_utils import fsdp_version, set_reshard_after_forward
from verl.utils.import_utils import import_external_libs
from verl.utils.memory_utils import aggressive_empty_cache
from verl.utils.profiler import DistProfiler, log_gpu_memory_usage, simple_timer
from verl.utils.profiler.performance import reduce_timing, topk_reduce_ratio_min_max
from verl.workers.config import HFModelConfig
from verl.workers.fsdp_workers import ActorRolloutRefWorker
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
device_name = get_device_name()
class RobActorRolloutRefWorker(ActorRolloutRefWorker):
"""
This worker can be instantiated as a standalone actor or a standalone rollout or a standalone reference policy
or a hybrid engine based on the config.rollout
"""
fsdp_unshard_exit_stack = contextlib.ExitStack()
def _build_rollout(self, trust_remote_code=False):
self.base_sync_done = False
world_size = torch.distributed.get_world_size()
dp = world_size
infer_tp = self.config.rollout.tensor_model_parallel_size
rollout_device_mesh = init_device_mesh(
device_name, mesh_shape=(dp, infer_tp), mesh_dim_names=["dp", "infer_tp"]
)
# 3. init trainer and rollout random states
self.torch_random_states = get_torch_device().get_rng_state()
gen_dp_rank = rollout_device_mesh["dp"].get_local_rank()
get_torch_device().manual_seed(gen_dp_rank + 1000) # make sure all tp ranks have the same random states
self.gen_random_states = get_torch_device().get_rng_state()
get_torch_device().set_rng_state(self.torch_random_states)
fsdp_ver = fsdp_version(self.actor_module_fsdp)
if torch.distributed.get_world_size() == 1 and fsdp_ver == 1:
FSDP.set_state_dict_type(
self.actor_module_fsdp,
state_dict_type=StateDictType.FULL_STATE_DICT,
state_dict_config=FullStateDictConfig(),
)
elif fsdp_ver == 1:
FSDP.set_state_dict_type(
self.actor_module_fsdp,
state_dict_type=StateDictType.SHARDED_STATE_DICT,
state_dict_config=ShardedStateDictConfig(),
)
elif fsdp_ver == 2:
# FSDP2 already handles state dict logic via torch.distributed.checkpoint APIs.
pass
else:
raise NotImplementedError(f"Unsupported fsdp version {fsdp_ver}")
self._register_dispatch_collect_info("rollout", dp_rank=self.rank, is_collect=True)
if self.config.get("algorithm", "grpo") == "sac":
from verl.experimental.vla.sac.naive_rollout_pi05 import PI0RolloutRob
self.rollout = PI0RolloutRob(
module=self.actor_module_fsdp, model_config=self.config.model, tokenizer=self.tokenizer
)
else:
from verl.experimental.vla.naive_rollout_rob import NaiveRolloutRob
self.rollout = NaiveRolloutRob(module=self.actor_module_fsdp, model_config=self.config.model)
model_config: HFModelConfig = omega_conf_to_dataclass(self.config.model, dataclass_type=HFModelConfig)
self.model_config = model_config
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def switch_to_rollout(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self.rollout_mode())
log_gpu_memory_usage("After switch to rollout mode", logger=logger)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def switch_to_train(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self.trainer_mode())
log_gpu_memory_usage("After switch to trainer mode", logger=logger)
async def rollout_mode(self):
"""Context switch hybridengine to rollout mode."""
self.actor_module_fsdp.eval()
aggressive_empty_cache(force_sync=True)
self.base_sync_done = True
# important: need to manually set the random states of each tp to be identical.
self.torch_random_states = get_torch_device().get_rng_state()
get_torch_device().set_rng_state(self.gen_random_states)
if fsdp_version(self.actor_module_fsdp) == 1:
fsdp_unshard_exit_stack = contextlib.ExitStack()
optional_state = _get_module_fsdp_state(self.actor_module_fsdp)
if optional_state is None:
self.fsdp_unshard_exit_stack = fsdp_unshard_exit_stack
states_and_modules = ([optional_state], [self.actor_module_fsdp])
for state, fsdp_module in zip(*states_and_modules, strict=False):
fsdp_unshard_exit_stack.enter_context(
_unshard_params_for_summon(
module=fsdp_module,
state=state,
writeback=False,
rank0_only=False,
offload_to_cpu=False,
with_grads=False,
)
)
self.fsdp_unshard_exit_stack = fsdp_unshard_exit_stack
elif fsdp_version(self.actor_module_fsdp) == 2:
self.actor_module_fsdp.unshard()
for m in self.actor_module_fsdp.modules():
if isinstance(m, FSDPModule) or hasattr(m, "unshard"):
m.unshard()
if version.parse(torch.__version__) < version.parse("2.8"):
set_reshard_after_forward(self.actor_module_fsdp, False)
else:
self.actor_module_fsdp.set_reshard_after_forward(False)
else:
raise NotImplementedError(f"Unsupported fsdp version {fsdp_version(self.actor_module_fsdp)}")
logger.info("rollout mode")
async def trainer_mode(self):
"""Context switch hybridengine to trainer mode."""
self.actor_module_fsdp.train()
# add empty cache after each compute
aggressive_empty_cache(force_sync=True)
set_expandable_segments(True)
# restore random states
self.gen_random_states = get_torch_device().get_rng_state()
get_torch_device().set_rng_state(self.torch_random_states)
if fsdp_version(self.actor_module_fsdp) == 1:
if self.fsdp_unshard_exit_stack is not None:
self.fsdp_unshard_exit_stack.close()
self.fsdp_unshard_exit_stack = None
elif fsdp_version(self.actor_module_fsdp) == 2:
self.actor_module_fsdp.reshard()
for m in self.actor_module_fsdp.modules():
if isinstance(m, FSDPModule) or hasattr(m, "reshard"):
m.reshard()
if version.parse(torch.__version__) < version.parse("2.8"):
set_reshard_after_forward(self.actor_module_fsdp, True)
else:
self.actor_module_fsdp.set_reshard_after_forward(True)
else:
raise NotImplementedError(f"Unsupported fsdp version {fsdp_version(self.actor_module_fsdp)}")
logger.info("trainer mode")
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="rollout"), blocking=False)
@DistProfiler.annotate(color="red", role="rollout_generate")
def generate_sequences(self, prompts: DataProto):
# Support all hardwares
assert self._is_rollout
prompts = prompts.to(get_device_id())
meta_info = {
"eos_token_id": self.model_config.generation_config.eos_token_id
if self.model_config.generation_config is not None
else self.model_config.tokenizer.eos_token_id,
"pad_token_id": self.model_config.generation_config.pad_token_id
if self.model_config.generation_config is not None
else self.model_config.tokenizer.pad_token_id,
}
prompts.meta_info.update(meta_info)
timing_generate = {}
with simple_timer("generate_sequences", timing_generate):
output = self.rollout.generate_sequences(prompts=prompts)
timing_generate_topk_ratio, timing_generate_min, timing_generate_max = topk_reduce_ratio_min_max(
timing_generate["generate_sequences"]
)
timing_generate = reduce_timing(timing_generate)
timing_generate.update(
{
"generation_timing/max": timing_generate_max,
"generation_timing/min": timing_generate_min,
"generation_timing/topk_ratio": timing_generate_topk_ratio,
}
)
output.meta_info["metrics"] = timing_generate
output = output.to("cpu")
# clear kv cache
get_torch_device().empty_cache()
return output
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def init_model(self):
# This is used to import external_lib into the huggingface systems
import_external_libs(self.config.model.get("external_lib", None))
from omegaconf import OmegaConf
override_model_config = OmegaConf.to_container(self.config.model.get("override_config", OmegaConf.create()))
from verl.experimental.vla.models import register_vla_models
register_vla_models()
from transformers import AutoProcessor
self.processor = AutoProcessor.from_pretrained(self.config.model.path, trust_remote_code=True)
if self._is_actor or self._is_rollout:
# we need the model for actor and rollout
if self._is_actor:
optim_config = self.config.actor.optim
fsdp_config = self.config.actor.fsdp_config
else:
optim_config = None
fsdp_config = OmegaConf.create()
self.actor_module_fsdp, self.actor_optimizer, self.actor_lr_scheduler, self.actor_model_config = (
self._build_model_optimizer(
model_path=self.config.model.path,
fsdp_config=fsdp_config,
optim_config=optim_config,
override_model_config=override_model_config,
enable_gradient_checkpointing=self.config.model.get("enable_gradient_checkpointing", False),
trust_remote_code=self.config.model.get("trust_remote_code", False),
)
)
if fsdp_version(self.actor_module_fsdp) == 1:
# get the original unwrapped module
self.actor_module = self.actor_module_fsdp._fsdp_wrapped_module
if self._is_actor:
OmegaConf.set_struct(self.config.actor, True)
if self.config.get("algorithm") == "sac":
from verl.experimental.vla.sac.sac_actor import RobDataParallelSACActor
self.actor = RobDataParallelSACActor(
config=self.config.actor,
actor_module=self.actor_module_fsdp,
actor_optimizer=self.actor_optimizer,
tokenizer=self.tokenizer,
)
else:
from verl.experimental.vla.dp_rob import RobDataParallelPPOActor
self.actor = RobDataParallelPPOActor(
config=self.config.actor, actor_module=self.actor_module_fsdp, actor_optimizer=self.actor_optimizer
)
if self._is_rollout:
self._build_rollout(trust_remote_code=self.config.model.get("trust_remote_code", False))
if self._is_actor:
self.flops_counter = FlopsCounter(self.actor_model_config)
self.checkpoint_manager = FSDPCheckpointManager(
model=self.actor_module_fsdp,
optimizer=self.actor.actor_optimizer,
lr_scheduler=self.actor_lr_scheduler,
processing_class=self.processor if self.processor is not None else self.tokenizer,
checkpoint_config=self.config.actor.checkpoint,
trust_remote_code=self.config.model.trust_remote_code,
)
torch.distributed.barrier()
|
verl__experimental__vla__fsdp_workers.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import datasets
import hydra
import ray
import torch
from omegaconf import OmegaConf
from verl import DataProto
from verl.trainer.constants_ppo import get_ppo_ray_runtime_env
from verl.trainer.ppo.ray_trainer import ResourcePoolManager
from verl.trainer.ppo.utils import Role
from verl.utils.device import is_cuda_available
from .rob_ray_trainer import RobRayPPOTrainer
logger = logging.getLogger(__name__)
def calculate_reward(data: DataProto, return_dict: bool = False) -> torch.Tensor:
complete_tensor = data.batch["complete"]
batch_size, num_steps = complete_tensor.shape[:2]
traj_has_complete = torch.any(complete_tensor, dim=(1, 2)) # shape: [batch_size]
reward_per_traj = traj_has_complete.float()
reward_per_step = reward_per_traj.unsqueeze(1).expand(batch_size, num_steps)
if return_dict:
return {"reward_tensor": reward_per_step}
else:
return reward_per_step
@hydra.main(config_path="config", config_name="rob_ppo_trainer", version_base=None)
def main(config):
if not ray.is_initialized():
default_runtime_env = get_ppo_ray_runtime_env()
ray_init_kwargs = config.ray_kwargs.get("ray_init", {})
runtime_env_kwargs = ray_init_kwargs.get("runtime_env", {})
runtime_env = OmegaConf.merge(default_runtime_env, runtime_env_kwargs)
ray_init_kwargs = OmegaConf.create({**ray_init_kwargs, "runtime_env": runtime_env})
logger.info(f"ray init kwargs: {ray_init_kwargs}")
ray.init(**OmegaConf.to_container(ray_init_kwargs))
# Apply controller nsight profiling if configured
if (
is_cuda_available
and config.global_profiler.tool == "nsys"
and config.global_profiler.get("steps") is not None
and len(config.global_profiler.get("steps", [])) > 0
):
from verl.utils.import_utils import is_nvtx_available
assert is_nvtx_available(), "nvtx is not available in CUDA platform. Please 'pip3 install nvtx'"
nsight_options = OmegaConf.to_container(
config.global_profiler.global_tool_config.nsys.controller_nsight_options
)
main_task_with_options = main_task.options(runtime_env={"nsight": nsight_options})
ray.get(main_task_with_options.remote(config))
else:
ray.get(main_task.remote(config))
# [Optional] get the path of the timeline trace file from the configuration, default to None
# This file is used for performance analysis
timeline_json_file = config.ray_kwargs.get("timeline_json_file", None)
if timeline_json_file:
ray.timeline(filename=timeline_json_file)
@ray.remote
def main_task(config):
# print initial config
from pprint import pprint
from omegaconf import OmegaConf
from verl.utils.fs import copy_local_path_from_hdfs
pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values
OmegaConf.resolve(config)
# download the checkpoint from hdfs
local_path = copy_local_path_from_hdfs(config.actor_rollout_ref.model.path)
# instantiate tokenizer
from verl.utils import hf_tokenizer
tokenizer = hf_tokenizer(local_path)
# define worker classes
if config.actor_rollout_ref.actor.strategy in ["fsdp", "fsdp2"]:
assert config.actor_rollout_ref.actor.strategy == config.critic.strategy
from verl.experimental.vla.workers.env.env_worker import EnvWorker
from verl.single_controller.ray import RayWorkerGroup
from .fsdp_workers import RobActorRolloutRefWorker
ray_worker_group_cls = RayWorkerGroup
else:
raise NotImplementedError
role_worker_mapping = {
# Role.Critic: ray.remote(RobActorRolloutRefWorker),
Role.ActorRollout: ray.remote(RobActorRolloutRefWorker),
# Role.RefPolicy: ray.remote(RobActorRolloutRefWorker),
Role.Env: ray.remote(EnvWorker),
}
train_rollout_pool_id = "train_rollout_pool"
num_nodes_actor_rollout = config.trainer.nnodes
train_rollout_gpu_num = config.trainer.n_rollout_gpus_per_node
env_gpu_num = config.trainer.n_env_gpus_per_node
if config.env.disagg_sim.enable:
# disaggregated sim and actor rollout
num_nodes_sim = config.env.disagg_sim.nnodes
else:
# colocated sim and actor rollout
num_nodes_sim = config.trainer.nnodes
resource_pool_spec = {
train_rollout_pool_id: [train_rollout_gpu_num] * num_nodes_actor_rollout,
"env_gpu_pool": [env_gpu_num] * num_nodes_sim,
}
mapping = {
Role.ActorRollout: train_rollout_pool_id,
# Role.Critic: global_pool_id,
# Role.RefPolicy: global_pool_id,
Role.Env: "env_gpu_pool",
}
reward_fn = calculate_reward
val_reward_fn = calculate_reward
resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping)
# Create training and validation datasets.
train_dataset = datasets.load_dataset("parquet", data_files=config.data.train_files)["train"]
val_dataset = datasets.load_dataset("parquet", data_files=config.data.val_files)["train"]
trainer = RobRayPPOTrainer(
config=config,
tokenizer=tokenizer,
role_worker_mapping=role_worker_mapping,
resource_pool_manager=resource_pool_manager,
ray_worker_group_cls=ray_worker_group_cls,
reward_fn=reward_fn,
val_reward_fn=val_reward_fn,
train_dataset=train_dataset,
val_dataset=val_dataset,
)
trainer.init_workers()
trainer.fit()
if __name__ == "__main__":
main()
|
verl__experimental__vla__main_ppo.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pprint import pprint
import datasets
import hydra
import ray
import torch
from omegaconf import OmegaConf
from verl import DataProto
from verl.experimental.vla.sac.sac_ray_trainer import RobRaySACTrainer
from verl.trainer.constants_ppo import get_ppo_ray_runtime_env
from verl.trainer.ppo.ray_trainer import ResourcePoolManager
from verl.trainer.ppo.utils import Role
from verl.utils import hf_tokenizer
from verl.utils.fs import copy_local_path_from_hdfs
logger = logging.getLogger(__name__)
def calculate_reward(data: DataProto, return_dict: bool = False) -> torch.Tensor:
complete_tensor = data.batch["complete"]
reward_per_step = complete_tensor.float()
if return_dict:
return {"reward_tensor": reward_per_step}
else:
return reward_per_step
@hydra.main(config_path="config", config_name="rob_sac_trainer", version_base=None)
def main(config):
if not ray.is_initialized():
default_runtime_env = get_ppo_ray_runtime_env()
ray_init_kwargs = config.ray_kwargs.get("ray_init", {})
runtime_env_kwargs = ray_init_kwargs.get("runtime_env", {})
runtime_env = OmegaConf.merge(default_runtime_env, runtime_env_kwargs)
ray_init_kwargs = OmegaConf.create({**ray_init_kwargs, "runtime_env": runtime_env})
logger.info(f"ray init kwargs: {ray_init_kwargs}")
ray.init(**OmegaConf.to_container(ray_init_kwargs))
ray.get(main_task.remote(config))
@ray.remote
def main_task(config):
# print initial config
pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values
OmegaConf.resolve(config)
# download the checkpoint from hdfs
local_path = copy_local_path_from_hdfs(config.actor_rollout_ref.model.path)
# instantiate tokenizer
tokenizer = hf_tokenizer(local_path)
# define worker classes
if config.actor_rollout_ref.actor.strategy in ["fsdp", "fsdp2"]:
assert config.actor_rollout_ref.actor.strategy == config.critic.strategy
from verl.experimental.vla.workers.env.env_worker import EnvWorker
from verl.single_controller.ray import RayWorkerGroup
from .fsdp_workers import RobActorRolloutRefWorker
ray_worker_group_cls = RayWorkerGroup
else:
raise NotImplementedError
role_worker_mapping = {
Role.ActorRollout: ray.remote(RobActorRolloutRefWorker),
Role.Env: ray.remote(EnvWorker),
}
# setup resource pool manager
train_rollout_gpu_num = config.trainer.n_rollout_gpus_per_node
train_rollout_nodes_num = config.trainer.nnodes
env_gpu_num = config.trainer.n_env_gpus_per_node
env_nodes_num = config.env.disagg_sim.nnodes if config.env.disagg_sim.enable else config.trainer.nnodes
resource_pool_spec = {
"train_rollout_pool": [train_rollout_gpu_num] * train_rollout_nodes_num,
"env_gpu_pool": [env_gpu_num] * env_nodes_num,
}
mapping = {
Role.ActorRollout: "train_rollout_pool",
Role.Env: "env_gpu_pool",
}
resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping)
# create datasets
train_dataset = datasets.load_dataset("parquet", data_files=config.data.train_files)["train"]
val_dataset = datasets.load_dataset("parquet", data_files=config.data.val_files)["train"]
# instantiate trainer and start training
trainer = RobRaySACTrainer(
config=config,
tokenizer=tokenizer,
role_worker_mapping=role_worker_mapping,
resource_pool_manager=resource_pool_manager,
ray_worker_group_cls=ray_worker_group_cls,
reward_fn=calculate_reward,
val_reward_fn=calculate_reward,
train_dataset=train_dataset,
val_dataset=val_dataset,
)
trainer.init_workers()
trainer.fit()
if __name__ == "__main__":
main()
|
verl__experimental__vla__main_sac.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn as nn
import torch.nn.init as init
class MLP(nn.Module):
"""
A configurable Multi-Layer Perceptron (MLP) module.
It supports dynamic layer construction, multiple activation functions,
and various weight initialization strategies.
Attributes:
input_dim (int): The number of input features.
hidden_dims (list of int): List containing the number of units in each hidden layer.
output_dim (int): The number of output units.
activation (str): The non-linear activation function to use.
Options: 'relu', 'tanh', 'sigmoid', 'leaky_relu', 'elu', 'selu', 'none'.
init_method (str): The weight initialization strategy.
Options: 'kaiming', 'xavier', 'normal', 'orthogonal'.
"""
def __init__(
self,
input_dim: int,
hidden_dims: list[int],
output_dim: int,
activation: str = "relu",
init_method: str = "kaiming",
):
super().__init__()
self.input_dim = input_dim
self.hidden_dims = hidden_dims
self.output_dim = output_dim
self.activation_name = activation.lower()
self.init_method = init_method.lower()
layers = []
current_dim = input_dim
for h_dim in hidden_dims:
layers.append(nn.Linear(current_dim, h_dim))
act_layer = self._get_activation(self.activation_name)
if act_layer is not None:
layers.append(act_layer)
current_dim = h_dim
layers.append(nn.Linear(current_dim, output_dim))
self.network = nn.Sequential(*layers)
self.apply(self.init_weights)
def _get_activation(self, name):
"""
Factory method to return the activation layer based on string name.
Available options: 'relu', 'tanh', 'sigmoid', 'leaky_relu', 'elu', 'selu'.
"""
activations = {
"relu": nn.ReLU(),
"tanh": nn.Tanh(),
"sigmoid": nn.Sigmoid(),
"leaky_relu": nn.LeakyReLU(0.2),
"elu": nn.ELU(),
"selu": nn.SELU(),
"none": None,
}
return activations.get(name, nn.ReLU())
def init_weights(self, m):
"""
Public method to initialize weights for Linear layers.
Can be used with self.apply(model.init_weights).
Supported methods:
- 'kaiming': Best for ReLU/LeakyReLU. Uses kaiming_normal_.
- 'xavier': Best for Tanh/Sigmoid. Uses xavier_normal_.
- 'normal': Standard normal distribution (std=0.02).
- 'orthogonal': Good for preventing gradient explosion in deep networks.
"""
if isinstance(m, nn.Linear):
if self.init_method == "kaiming":
# Use 'relu' as default nonlinearity for Kaiming
nonlinearity = self.activation_name if self.activation_name in ["relu", "leaky_relu"] else "relu"
init.kaiming_normal_(m.weight, nonlinearity=nonlinearity)
elif self.init_method == "xavier":
init.xavier_normal_(m.weight)
elif self.init_method == "normal":
init.normal_(m.weight, mean=0.0, std=0.02)
elif self.init_method == "orthogonal":
init.orthogonal_(m.weight)
# Initialize bias to zero
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x):
"""Defines the computation performed at every call."""
return self.network(x)
|
verl__experimental__vla__models__modules__mlp.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from https://github.com/PRIME-RL/SimpleVLA-RL/blob/main/verl/utils/vla_utils/openvla_oft/
# form https://huggingface.co/Haozhan72/Openvla-oft-SFT-libero10-trajall/blob/main/
"""
configuration_prismatic.py
HuggingFace-style configuration definition for Prismatic VLMs, inheriting from `transformers.PretrainedConfig`.
Default configuration specifies `siglip-224px+7b`.
"""
from typing import Any, Optional
from transformers import PretrainedConfig
from transformers.models.auto import CONFIG_MAPPING
# === Utilities for Mapping Prismatic names to HF names ===
# fmt: off
VISION_BACKBONE_TO_RESOLUTION: dict[str, list[int]] = {
"clip-vit-l": [224], "siglip-vit-so400m": [224], "dinov2-vit-l": [224], "in1k-vit-l": [224],
"clip-vit-l-336px": [336],
"siglip-vit-so400m-384px": [384],
"dinoclip-vit-l-336px": [336, 336],
"dinosiglip-vit-so-224px": [224, 224],
"dinosiglip-vit-so-384px": [384, 384],
}
VISION_BACKBONE_TO_TIMM_ID: dict[str, list[str]] = {
"clip-vit-l": ["vit_large_patch14_clip_224.openai"],
"clip-vit-l-336px": ["vit_large_patch14_clip_336.openai"],
"dinov2-vit-l": ["vit_large_patch14_reg4_dinov2.lvd142m"],
"in1k-vit-l": ["vit_large_patch16_224.augreg_in21k_ft_in1k"],
"siglip-vit-so400m": ["vit_so400m_patch14_siglip_224"],
"siglip-vit-so400m-384px": ["vit_so400m_patch14_siglip_384"],
"dinoclip-vit-l-336px": ["vit_large_patch14_reg4_dinov2.lvd142m", "vit_large_patch14_clip_336.openai"],
"dinosiglip-vit-so-224px": ["vit_large_patch14_reg4_dinov2.lvd142m", "vit_so400m_patch14_siglip_224"],
"dinosiglip-vit-so-384px": ["vit_large_patch14_reg4_dinov2.lvd142m", "vit_so400m_patch14_siglip_384"],
}
TIMM_OVERRIDE_ACT_LAYER: dict[str, list[Optional[str]]] = {
"clip-vit-l": ["quick_gelu"], "clip-vit-l-336px": ["quick_gelu"],
"dinov2-vit-l": [None], "in1k-vit-l": [None],
"siglip-vit-so400m": [None], "siglip-vit-so400m-384px": [None],
"dinoclip-vit-l-336px": [None, "quick_gelu"],
"dinosiglip-vit-so-224px": [None, None], "dinosiglip-vit-so-384px": [None, None]
}
LLM_BACKBONE_TO_HF_PATH = {
"llama2-7b-pure": "meta-llama/Llama-2-7b-hf", "llama2-13b-pure": "meta-llama/Llama-2-13b-hf",
"llama2-7b-chat": "meta-llama/Llama-2-7b-chat-hf", "llama2-13b-chat": "meta-llama/Llama-2-13b-chat-hf",
"vicuna-v15-7b": "lmsys/vicuna-7b-v1.5", "vicuna-v15-13b": "lmsys/vicuna-13b-v1.5",
"mistral-v0.1-7b-pure": "mistralai/Mistral-7B-v0.1",
"mistral-v0.1-7b-instruct": "mistralai/Mistral-7B-Instruct-v0.1",
"phi-2-3b": "microsoft/phi-2",
}
LLM_BACKBONE_TO_HF_METACLASS = {
"llama2-7b-pure": "llama", "llama2-13b-pure": "llama", "llama2-7b-chat": "llama", "llama2-13b-chat": "llama",
"vicuna-v15-7b": "llama", "vicuna-v15-13b": "llama",
"mistral-v0.1-7b-pure": "mistral", "mistral-v0.1-7b-instruct": "mistral",
"phi-2-3b": "phi",
}
VALID_VISION_BACKBONES = set(VISION_BACKBONE_TO_RESOLUTION.keys())
VALID_LLM_BACKBONES = set(LLM_BACKBONE_TO_HF_PATH)
# fmt: on
class PrismaticConfig(PretrainedConfig):
model_type: str = "prismatic"
is_composition: bool = False
def __init__(
self,
vision_backbone_id: str = "siglip-vit-so400m",
llm_backbone_id: str = "vicuna-v15-7b",
arch_specifier: str = "no-align+gelu-mlp",
use_fused_vision_backbone: Optional[bool] = None,
image_resize_strategy: str = "letterbox",
text_config: Optional[dict[str, Any]] = None,
llm_max_length: int = 2048,
pad_token_id: int = 32000,
pad_to_multiple_of: int = 64,
output_projector_states: bool = False,
**kwargs: str,
) -> None:
if vision_backbone_id not in VALID_VISION_BACKBONES:
raise ValueError(f"Vision backbone `{vision_backbone_id}` not in {VALID_VISION_BACKBONES = }")
if llm_backbone_id not in VALID_LLM_BACKBONES:
raise ValueError(f"LLM backbone `{llm_backbone_id}` not in {VALID_LLM_BACKBONES = }")
# Set Prismatic Configuration Fields
self.vision_backbone_id = vision_backbone_id
self.llm_backbone_id = llm_backbone_id
self.arch_specifier = arch_specifier
self.output_projector_states = output_projector_states
# [Contract] All vision backbone parameters are lists =>> supports fused backbones with different preprocessing
self.use_fused_vision_backbone = (
use_fused_vision_backbone
if use_fused_vision_backbone is not None
else any(self.vision_backbone_id.startswith(v) for v in ["dinoclip", "dinosiglip"])
)
self.timm_model_ids = VISION_BACKBONE_TO_TIMM_ID[self.vision_backbone_id]
self.timm_override_act_layers = TIMM_OVERRIDE_ACT_LAYER[self.vision_backbone_id]
self.image_sizes = VISION_BACKBONE_TO_RESOLUTION[self.vision_backbone_id]
self.image_resize_strategy = image_resize_strategy
self.hf_llm_id = LLM_BACKBONE_TO_HF_PATH[self.llm_backbone_id]
self.llm_max_length = llm_max_length
self.pad_token_id, self.pad_to_multiple_of = pad_token_id, pad_to_multiple_of
# [IMPORTANT] HF Utilities actually look for a `text_config` field... we need to use that specific naming!
self.text_config = (
CONFIG_MAPPING[LLM_BACKBONE_TO_HF_METACLASS[self.llm_backbone_id]](**text_config)
if text_config is not None
else CONFIG_MAPPING[LLM_BACKBONE_TO_HF_METACLASS[self.llm_backbone_id]]()
)
# Dispatch **kwargs to super() =>> note that `pad_token_id` collides, so we pass it in here as well...
super().__init__(pad_token_id=pad_token_id, **kwargs)
class OpenVLAConfig(PrismaticConfig):
model_type: str = "openvla"
def __init__(
self,
norm_stats: Optional[dict[str, dict[str, dict[str, dict[str, list[float]]]]]] = None,
n_action_bins: int = 256,
**kwargs: str,
) -> None:
self.norm_stats, self.n_action_bins = norm_stats, n_action_bins
super().__init__(**kwargs)
|
verl__experimental__vla__models__openvla_oft__configuration_prismatic.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from https://github.com/PRIME-RL/SimpleVLA-RL/blob/main/verl/utils/vla_utils/openvla_oft/
"""
Important constants for VLA training and evaluation.
Attempts to automatically identify the correct constants to set based on the Python command used to launch
training or evaluation. If it is unclear, defaults to using the LIBERO simulation benchmark constants.
"""
import sys
from enum import Enum
# Llama 2 token constants
IGNORE_INDEX = -100
ACTION_TOKEN_BEGIN_IDX = 31743
STOP_INDEX = 2 # '</s>'
# Defines supported normalization schemes for action and proprioceptive state.
class NormalizationType(str, Enum):
# fmt: off
NORMAL = "normal" # Normalize to Mean = 0, Stdev = 1
BOUNDS = "bounds" # Normalize to Interval = [-1, 1]
BOUNDS_Q99 = "bounds_q99" # Normalize [quantile_01, ..., quantile_99] --> [-1, ..., 1]
# fmt: on
# Define constants for each robot platform
LIBERO_CONSTANTS = {
"NUM_ACTIONS_CHUNK": 8,
"ACTION_DIM": 7,
"PROPRIO_DIM": 8,
"ACTION_PROPRIO_NORMALIZATION_TYPE": NormalizationType.BOUNDS_Q99,
}
ALOHA_CONSTANTS = {
"NUM_ACTIONS_CHUNK": 25,
"ACTION_DIM": 14,
"PROPRIO_DIM": 14,
"ACTION_PROPRIO_NORMALIZATION_TYPE": NormalizationType.BOUNDS,
}
BRIDGE_CONSTANTS = {
"NUM_ACTIONS_CHUNK": 5,
"ACTION_DIM": 7,
"PROPRIO_DIM": 7,
"ACTION_PROPRIO_NORMALIZATION_TYPE": NormalizationType.BOUNDS_Q99,
}
# Function to detect robot platform from command line arguments
def detect_robot_platform():
cmd_args = " ".join(sys.argv).lower()
if "libero" in cmd_args:
return "LIBERO"
elif "aloha" in cmd_args:
return "ALOHA"
elif "bridge" in cmd_args:
return "BRIDGE"
else:
# Default to LIBERO if unclear
return "LIBERO"
# Determine which robot platform to use
ROBOT_PLATFORM = detect_robot_platform()
# Set the appropriate constants based on the detected platform
if ROBOT_PLATFORM == "LIBERO":
constants = LIBERO_CONSTANTS
elif ROBOT_PLATFORM == "ALOHA":
constants = ALOHA_CONSTANTS
elif ROBOT_PLATFORM == "BRIDGE":
constants = BRIDGE_CONSTANTS
# Assign constants to global variables
NUM_ACTIONS_CHUNK = constants["NUM_ACTIONS_CHUNK"]
ACTION_DIM = constants["ACTION_DIM"]
PROPRIO_DIM = constants["PROPRIO_DIM"]
ACTION_PROPRIO_NORMALIZATION_TYPE = constants["ACTION_PROPRIO_NORMALIZATION_TYPE"]
# Print which robot platform constants are being used (for debugging)
print(f"Using {ROBOT_PLATFORM} constants:")
print(f" NUM_ACTIONS_CHUNK = {NUM_ACTIONS_CHUNK}")
print(f" ACTION_DIM = {ACTION_DIM}")
print(f" PROPRIO_DIM = {PROPRIO_DIM}")
print(f" ACTION_PROPRIO_NORMALIZATION_TYPE = {ACTION_PROPRIO_NORMALIZATION_TYPE}")
print("If needed, manually set the correct constants in `prismatic/vla/constants.py`!")
|
verl__experimental__vla__models__openvla_oft__constants.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from https://github.com/PRIME-RL/SimpleVLA-RL/blob/main/verl/utils/vla_utils/openvla_oft/
# form https://huggingface.co/Haozhan72/Openvla-oft-SFT-libero10-trajall/blob/main/
"""
processing_prismatic.py
HuggingFace-style preprocessor definitions for Prismatic VLMs, inheriting from `ProcessorMixin`. Default configuration
specifies `siglip-224px+7b`.
"""
from typing import Any, ClassVar, Optional
import timm.data
import torch
import torchvision.transforms.functional as TVF
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import PreTrainedTokenizerBase
from transformers.image_processing_utils import BatchFeature, ImageProcessingMixin
from transformers.processing_utils import ProcessorMixin
from transformers.tokenization_utils import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from transformers.utils import TensorType
# === Image Processing ===
def letterbox_pad_transform(image: Image.Image, padding_fill_value: tuple[int, int, int]) -> Image.Image:
"""Given a PIL.Image, pad to square by adding a symmetric border around the height/width."""
(w, h), max_wh = image.size, max(image.size)
horizontal_pad, vertical_pad = int((max_wh - w) / 2), int((max_wh - h) / 2)
padding = (horizontal_pad, vertical_pad, horizontal_pad, vertical_pad)
return TVF.pad(image, padding, fill=padding_fill_value, padding_mode="constant")
class PrismaticImageProcessor(ImageProcessingMixin):
model_input_names: ClassVar[list[str]] = ["pixel_values"]
def __init__(
self,
use_fused_vision_backbone: bool = False,
image_resize_strategy: str = "letterbox",
input_sizes: Optional[list[tuple[int, int, int]]] = None,
interpolations: Optional[list[str]] = None,
means: Optional[list[tuple[float, float, float]]] = None,
stds: Optional[list[tuple[float, float, float]]] = None,
**kwargs: str,
) -> None:
"""
Initialize a PrismaticImageProcessor as a wrapper around a torchvision transform; this transform will be
created by TIMM, and edited to follow our custom `image_resize_strategy` logic.
@param use_fused_vision_backbone: Boolean indicating single or fused (dual) vision backbone
@param image_resize_strategy: Prismatic image resize strategy in < resize-naive | resize-crop | letterbox >
@param input_size: [TIMM :: `data_cfg`] Input image size as tuple (channels, width, height)
@param interpolation: [TIMM :: `data_cfg`] Interpolation as string (default: "bicubic")
@param mean: [TIMM :: `data_cfg`] Normalization mean as float tuple (or two-tuple if `fused_backbone`)
@param std: [TIMM :: `data_cfg`] Normalization std as float tuple (or two-tuple if `fused_backbone`)
"""
self.use_fused_vision_backbone = use_fused_vision_backbone
self.image_resize_strategy = image_resize_strategy
# Handle `None` default values
input_sizes = [(3, 224, 224)] if input_sizes is None else input_sizes
means = [(0.5, 0.5, 0.5)] if means is None else means
stds = [(0.5, 0.5, 0.5)] if stds is None else stds
# TIMM `data_cfg` Parameters
self.input_sizes, self.interpolations, self.means, self.stds = input_sizes, interpolations, means, stds
# Grab torchvision transforms via TIMM =>> need to parse for specific "functional" transform values!
self.tvf_resize_params, self.tvf_crop_params, self.tvf_normalize_params = [], [], []
self.tvf_do_letterbox, self.tvf_letterbox_fill = False, None
for idx in range(len(input_sizes)):
transform = timm.data.create_transform(
input_size=self.input_sizes[idx],
interpolation=self.interpolations[idx],
mean=self.means[idx],
std=self.stds[idx],
crop_pct=1.0, # Set to 1.0 to ignore cropping (initial Resize sets `input_size`)
crop_mode="center", # Default crop mode -- no-op when `crop_pct == 1.0`
is_training=False, # No image augmentations when loading the transform!
)
# [Validation] Ensure appropriate transform structure, expected sizes
if not (
isinstance(transform, Compose)
and (len(transform.transforms) == 4)
and isinstance(transform.transforms[0], Resize)
and isinstance(transform.transforms[1], CenterCrop)
and isinstance(transform.transforms[2], ToTensor)
and isinstance(transform.transforms[3], Normalize)
and (transform.transforms[0].size == self.input_sizes[idx][-1])
and (transform.transforms[1].size == self.input_sizes[idx][-2:])
):
raise ValueError(f"Unexpected TIMM image transformation structure/sizes: `{transform}`")
# HF Image Processors *must* be JSON-serializable; as such, cannot have torchvision. as an attribute.
# => Instead, we're going to parse the transform and call "torchvision.transforms.functional" (`tvf`)
resize_t, crop_t, norm_t = transform.transforms[0], transform.transforms[1], transform.transforms[3]
self.tvf_resize_params.append(
{
"size": resize_t.size,
"interpolation": TVF.pil_modes_mapping[resize_t.interpolation],
"max_size": None,
"antialias": True,
}
)
self.tvf_crop_params.append({"output_size": crop_t.size})
self.tvf_normalize_params.append(
{
"mean": norm_t.mean.float().numpy().tolist(),
"std": norm_t.std.float().numpy().tolist(),
"inplace": False,
}
)
self.tvf_do_letterbox, self.tvf_letterbox_fill = False, None
# Handle Prismatic `image_resize_strategy`
if self.image_resize_strategy == "resize-naive":
self.tvf_resize_params[idx]["size"] = (resize_t.size, resize_t.size)
elif self.image_resize_strategy == "letterbox":
self.tvf_do_letterbox, self.tvf_letterbox_fill = True, tuple([int(x * 255) for x in self.means[idx]])
elif self.image_resize_strategy == "resize-crop":
pass
else:
raise ValueError(f"Image resize strategy `{self.image_resize_strategy}` is not supported!")
# Dispatch **kwargs to super()
super().__init__(**kwargs)
def apply_transform(self, img: Image.Image) -> torch.Tensor:
"""Apply `functional` variant of TIMM's Transform = Compose([Resize -> CenterCrop -> ToTensor -> Normalize])"""
if self.tvf_do_letterbox:
img = letterbox_pad_transform(img, self.tvf_letterbox_fill)
# [Contract] Fused Backbones expect "channel-stacked" inputs; we'll unpack on the model side!
imgs_t = []
for idx in range(len(self.input_sizes)):
img_idx = TVF.resize(img, **self.tvf_resize_params[idx])
img_idx = TVF.center_crop(img_idx, **self.tvf_crop_params[idx])
img_idx_t = TVF.to_tensor(img_idx)
img_idx_t = TVF.normalize(img_idx_t, **self.tvf_normalize_params[idx])
imgs_t.append(img_idx_t)
# [Contract] `imgs_t` is a list of Tensors of shape [3, input_size, input_size]; stack along dim = 0
img_t = torch.vstack(imgs_t)
return img_t
def preprocess(
self,
images: Image.Image | list[Image.Image],
return_tensors: Optional[str | TensorType] = None,
**_: str,
) -> BatchFeature:
"""
Preprocess an image (or batch of images); note that unlike the `transformers :: BaseImageProcessor` we
explicitly only handle PIL.Image.Image instances for simplicity.
@param images: A (batch of) PIL.Image.Image instance(s) to preprocess.
@param return_tensors: BatchFeature default Tensor format (e.g., "pt" for torch); if None, returns np.ndarray
@return: Instance of `transformers :: BatchFeature` with a single key "pixel_values"
"""
if not isinstance(images, list):
images = [images]
# Apply `self.img_transform` to each image (will return list of torch.Tensors); stack into "batched" Tensor
pixel_values = torch.stack([self.apply_transform(img.convert("RGB")) for img in images])
# Return BatchFeature =>> note that for compatibility, constructor expects Dict[str, np.ndarray], so we convert
return BatchFeature(data={"pixel_values": pixel_values.float().numpy()}, tensor_type=return_tensors)
def __call__(self, images: Image.Image | list[Image.Image], **kwargs) -> BatchFeature:
return self.preprocess(images, **kwargs)
# === PrismaticProcessor =>> Wraps both ImageProcessor and Tokenizer ===
# =>> https://github.com/huggingface/transformers/blob/main/src/transformers/models/llava/processing_llava.py
class PrismaticProcessor(ProcessorMixin):
attributes: ClassVar[list[str]] = ["image_processor", "tokenizer"]
image_processor_class: str = "AutoImageProcessor"
tokenizer_class: str = "AutoTokenizer"
def __init__(
self,
image_processor: Optional[ImageProcessingMixin] = None,
tokenizer: Optional[PreTrainedTokenizerBase] = None,
) -> None:
super().__init__(image_processor, tokenizer)
def __call__(
self,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput],
images: Image.Image | list[Image.Image],
padding: bool | str | PaddingStrategy = False,
truncation: Optional[bool | str | TruncationStrategy] = None,
max_length: Optional[int] = None,
return_tensors: Optional[str | TensorType] = TensorType.PYTORCH,
) -> BatchFeature:
"""
Preprocess a given (batch) of text/images for a Prismatic VLM; forwards text to the underlying LLM's tokenizer,
forwards images to PrismaticImageProcessor.
@param text: The (batch) of text to encode; must be a string or list of strings.
@param images: A (batch of) PIL.Image.Image instance(s) to preprocess.
@param padding: Sequence padding strategy (if multiple specified) in < True = "longest" | "max_length" | False >
@param truncation: Truncation strategy for the output sequences; requires `max_length` to be specified
@param max_length: Maximum length (in tokens) to truncate
@param return_tensors: Type of return tensors (usually "pt" or TensorType.PYTORCH)
@return: BatchFeature with keys for `input_ids`, `attention_mask` and `pixel_values`.
"""
pixel_values = self.image_processor(images, return_tensors=return_tensors)["pixel_values"]
text_inputs = self.tokenizer(
text, return_tensors=return_tensors, padding=padding, truncation=truncation, max_length=max_length
)
# [Validate] Need same number of images and text inputs!
if pixel_values.shape[0] != text_inputs.input_ids.shape[0]:
raise ValueError("Batch is malformed; expected same number of images and text inputs!")
return BatchFeature(data={**text_inputs, "pixel_values": pixel_values})
# === Tokenizer Dispatch Utilities =>> check `PreTrainedTokenizerBase` for documentation ===
def batch_decode(
self,
sequences: list[int] | list[list[int]] | torch.Tensor | Any, # `Any` = np.ndarray | tf.Tensor
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: Optional[bool] = None,
**kwargs: str,
) -> list[str]:
return self.tokenizer.batch_decode(
sequences=sequences,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
def decode(
self,
token_ids: int | list[int] | torch.Tensor | Any, # `Any` = np.ndarray | tf.Tensor
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: Optional[bool] = None,
**kwargs: str,
) -> str:
return self.tokenizer.decode(
token_ids=token_ids,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
@property
def model_input_names(self) -> list[str]:
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
|
verl__experimental__vla__models__openvla_oft__processing_prismatic.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from https://github.com/PRIME-RL/SimpleVLA-RL/blob/main/verl/utils/vla_utils/openvla_oft/
"""Utils for training/fine-tuning scripts."""
import torch
from .constants import ACTION_DIM, ACTION_TOKEN_BEGIN_IDX, IGNORE_INDEX
def get_current_action_mask(token_ids):
# Create a tensor marking positions of IGNORE_INDEX
newline_positions = token_ids != IGNORE_INDEX
# Calculate cumulative sum to identify regions between newlines
cumsum = torch.cumsum(newline_positions, dim=1)
# Create the mask
mask = (1 <= cumsum) & (cumsum <= ACTION_DIM)
# Extract the action part only
action_tokens_only_mask = token_ids > ACTION_TOKEN_BEGIN_IDX
mask = action_tokens_only_mask * mask
return mask
def get_next_actions_mask(token_ids):
# Create a tensor marking positions of IGNORE_INDEX
newline_positions = token_ids != IGNORE_INDEX
# Calculate cumulative sum to identify regions between newlines
cumsum = torch.cumsum(newline_positions, dim=1)
# Create the mask
mask = cumsum > ACTION_DIM
# Extract the action part only
action_tokens_only_mask = token_ids > ACTION_TOKEN_BEGIN_IDX
mask = action_tokens_only_mask * mask
return mask
def compute_token_accuracy(predicted_token_ids, ground_truth_token_ids, mask):
correct_preds = (predicted_token_ids == ground_truth_token_ids) & mask
accuracy = correct_preds.sum().float() / mask.sum().float()
return accuracy
def compute_actions_l1_loss(action_tokenizer, predicted_token_ids, ground_truth_token_ids, mask):
pred_continuous_actions = torch.tensor(
action_tokenizer.decode_token_ids_to_actions(predicted_token_ids[mask].cpu().numpy())
)
true_continuous_actions = torch.tensor(
action_tokenizer.decode_token_ids_to_actions(ground_truth_token_ids[mask].cpu().numpy())
)
l1_loss = torch.nn.functional.l1_loss(pred_continuous_actions, true_continuous_actions)
return l1_loss
|
verl__experimental__vla__models__openvla_oft__train_utils.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright 2025 Giga Team. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# from https://github.com/open-gigaai/giga-models
import math
import torch
import torch.nn.functional as F # noqa: N812
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.models.modeling_utils import ModelMixin
from torch import Tensor, nn
from .paligemma_with_expert import PaliGemmaWithExpertModel
def get_safe_dtype(dtype: torch.dtype, device: str | torch.device) -> torch.dtype:
"""Mps is currently not compatible with float64."""
if isinstance(device, torch.device):
device = device.type
if device == "mps" and dtype == torch.float64:
return torch.float32
else:
return dtype
def create_sinusoidal_pos_embedding(
time: torch.Tensor, dimension: int, min_period: float, max_period: float, device: str | torch.device = "cpu"
) -> Tensor:
"""Computes sine-cosine positional embedding vectors for scalar
positions."""
if dimension % 2 != 0:
raise ValueError(f"dimension ({dimension}) must be divisible by 2")
if time.ndim != 1:
raise ValueError("The time tensor is expected to be of shape `(batch_size, )`.")
dtype = get_safe_dtype(torch.float64, device)
fraction = torch.linspace(0.0, 1.0, dimension // 2, dtype=dtype, device=device)
period = min_period * (max_period / min_period) ** fraction
# Compute the outer product
scaling_factor = 1.0 / period * 2 * math.pi
sin_input = scaling_factor[None, :] * time[:, None]
pos_emb = torch.cat([torch.sin(sin_input), torch.cos(sin_input)], dim=1)
return pos_emb
def make_att_2d_masks(pad_masks: torch.Tensor, att_masks: torch.Tensor) -> torch.Tensor:
"""Copied from big_vision.
Tokens can attend to valid inputs tokens which have a cumulative mask_ar
smaller or equal to theirs. This way `mask_ar` int[B, N] can be used to
setup several types of attention, for example:
[[1 1 1 1 1 1]]: pure causal attention.
[[0 0 0 1 1 1]]: prefix-lm attention. The first 3 tokens can attend between
themselves and the last 3 tokens have a causal attention. The first
entry could also be a 1 without changing behaviour.
[[1 0 1 0 1 0 0 1 0 0]]: causal attention between 4 blocks. Tokens of a
block can attend all previous blocks and all tokens on the same block.
Args:
pad_masks: bool[B, N] indicating valid (true) vs. padding (false) tokens.
att_masks: int[B, N] defining attention type. A `1` at a position
indicates the start of a new causal block.
Returns:
A 2D boolean attention mask of shape (B, N, N).
"""
if att_masks.ndim != 2:
raise ValueError(att_masks.ndim)
if pad_masks.ndim != 2:
raise ValueError(pad_masks.ndim)
cumsum = torch.cumsum(att_masks, dim=1)
att_2d_masks = cumsum[:, None, :] <= cumsum[:, :, None]
pad_2d_masks = pad_masks[:, None, :] * pad_masks[:, :, None]
att_2d_masks = att_2d_masks & pad_2d_masks
return att_2d_masks
class PI0Model(ModelMixin, ConfigMixin):
"""pi0: A Vision-Language-Action Flow Model for General Robot Control.
[Paper](https://www.physicalintelligence.company/download/pi0.pdf)
[Jax code](https://github.com/Physical-Intelligence/openpi)
┌──────────────────────────────┐
│ actions │
│ ▲ │
│ ┌┴─────┐ │
│ kv cache │Gemma │ │
│ ┌──────────►│Expert│ │
│ │ │ │ │
│ ┌┴────────┐ │x 10 │ │
│ │ │ └▲──▲──┘ │
│ │PaliGemma│ │ │ │
│ │ │ │ robot state │
│ │ │ noise │
│ └▲──▲─────┘ │
│ │ │ │
│ │ image(s) │
│ language tokens │
└──────────────────────────────┘
"""
@register_to_config
def __init__(
self,
max_state_dim: int = 32,
max_action_dim: int = 32,
proj_width: int = 1024,
n_action_steps: int = 50,
num_steps: int = 10,
use_cache: bool = True,
pi05_enabled: bool = False,
):
super().__init__()
# Store the parameters
self.max_state_dim = max_state_dim
self.max_action_dim = max_action_dim
self.proj_width = proj_width
self.n_action_steps = n_action_steps
self.num_steps = num_steps
self.use_cache = use_cache
self.pi05_enabled = pi05_enabled
self.paligemma_with_expert = PaliGemmaWithExpertModel(
pi05_enabled=pi05_enabled,
)
# Projections are float32
if self.pi05_enabled:
self.time_mlp_in = nn.Linear(self.proj_width, self.proj_width, dtype=torch.float32)
self.time_mlp_out = nn.Linear(self.proj_width, self.proj_width, dtype=torch.float32)
else:
self.state_proj = nn.Linear(self.max_state_dim, self.proj_width, dtype=torch.float32)
self.action_time_mlp_in = nn.Linear(self.proj_width * 2, self.proj_width, dtype=torch.float32)
self.action_time_mlp_out = nn.Linear(self.proj_width, self.proj_width, dtype=torch.float32)
self.action_in_proj = nn.Linear(self.max_action_dim, self.proj_width, dtype=torch.float32)
self.action_out_proj = nn.Linear(self.proj_width, self.max_action_dim, dtype=torch.float32)
def forward(
self,
images: list[torch.Tensor],
img_masks: list[torch.Tensor],
lang_tokens: torch.Tensor,
lang_masks: torch.Tensor,
state: torch.Tensor,
x_t: torch.Tensor,
timestep: torch.Tensor,
) -> Tensor:
"""Full forward pass for one diffusion denoising step.
Args:
images: List of image tensors, each shaped (B, C, H, W) after batching.
img_masks: List of boolean masks corresponding to images, each (B,).
lang_tokens: Language token ids (B, L).
lang_masks: Language attention mask (B, L) with True for valid tokens.
state: State tensor (B, state_dim) if pi05 is disabled else ignored.
x_t: Noisy action tokens (B, n_action_steps, action_dim).
timestep: Diffusion timestep as float tensor (B,).
Returns:
Predicted v_t with shape (B, n_action_steps, action_dim).
"""
prefix_embs, prefix_pad_masks, prefix_att_masks = self.embed_prefix(images, img_masks, lang_tokens, lang_masks)
suffix_embs, suffix_pad_masks, suffix_att_masks, adarms_cond = self.embed_suffix(state, x_t, timestep)
pad_masks = torch.cat([prefix_pad_masks, suffix_pad_masks], dim=1)
att_masks = torch.cat([prefix_att_masks, suffix_att_masks], dim=1)
att_2d_masks = make_att_2d_masks(pad_masks, att_masks)
position_ids = torch.cumsum(pad_masks, dim=1) - 1
(_, suffix_out), _ = self.paligemma_with_expert.forward(
attention_mask=att_2d_masks,
position_ids=position_ids,
past_key_values=None,
inputs_embeds=[prefix_embs, suffix_embs],
use_cache=False,
fill_kv_cache=False,
adarms_cond=[None, adarms_cond],
)
suffix_out = suffix_out[:, -self.n_action_steps :]
# Original openpi code, upcast attention output
suffix_out = suffix_out.to(dtype=self.action_out_proj.weight.dtype)
v_t = self.action_out_proj(suffix_out)
return v_t
def sample_noise(self, shape: tuple[int, ...], device: torch.device | str) -> torch.Tensor:
"""Generate Gaussian noise for the action trajectory.
Args:
shape: Desired output shape, typically (B, n_action_steps, action_dim).
device: Target device string or torch.device.
Returns:
A float32 tensor of standard normal samples with the given shape.
"""
noise = torch.normal(
mean=0.0,
std=1.0,
size=shape,
dtype=torch.float32,
device=device,
)
return noise
def embed_prefix(
self,
images: list[torch.Tensor],
img_masks: list[torch.Tensor],
lang_tokens: torch.Tensor,
lang_masks: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Embed visual and language inputs as the transformer prefix.
Args:
images: List of (B, C, H, W) tensors.
img_masks: List of (B,) boolean masks for image presence.
lang_tokens: (B, L) token ids.
lang_masks: (B, L) boolean mask; True indicates valid tokens.
Returns:
A tuple of (embs, pad_masks, att_masks):
- embs: (B, Np, D) concatenated image and language embeddings
- pad_masks: (B, Np) valid token mask
- att_masks: (B, Np) attention mask scheme selector
"""
# Optimize: batch process images and pre-allocate tensors
num_images = len(images)
# Stack images and masks for batch processing
images_stacked = torch.stack(images, dim=0) # (num_images, bsize, ...)
img_masks_stacked = torch.stack(img_masks, dim=0) # (num_images, bsize)
# Batch embed all images at once
# Reshape to (num_images * bsize, ...)
orig_shape = images_stacked.shape
images_flat = images_stacked.reshape(-1, *orig_shape[2:])
img_embs_flat = self.paligemma_with_expert.embed_image(images_flat)
# Reshape back to (num_images, bsize, num_img_embs, emb_dim)
bsize = orig_shape[1]
img_embs = img_embs_flat.reshape(num_images, bsize, *img_embs_flat.shape[1:])
# Normalize image embeddings
img_emb_dim = img_embs.shape[-1]
num_img_embs = img_embs.shape[2]
# Expand masks: (num_images, bsize) -> (num_images, bsize, num_img_embs)
img_masks_expanded = img_masks_stacked[:, :, None].expand(num_images, bsize, num_img_embs)
# Reshape to (bsize, num_images * num_img_embs, emb_dim)
img_embs_concat = img_embs.transpose(0, 1).reshape(bsize, num_images * num_img_embs, img_emb_dim)
img_masks_concat = img_masks_expanded.transpose(0, 1).reshape(bsize, num_images * num_img_embs)
# Process language embeddings
lang_emb = self.paligemma_with_expert.embed_language_tokens(lang_tokens)
lang_emb_dim = lang_emb.shape[-1]
lang_emb = lang_emb * math.sqrt(lang_emb_dim)
lang_emb = lang_emb.to(dtype=img_embs_concat.dtype)
num_lang_embs = lang_emb.shape[1]
total_seq_len = num_images * num_img_embs + num_lang_embs
# Pre-allocate final tensors
embs = torch.empty(
bsize, total_seq_len, img_emb_dim, dtype=img_embs_concat.dtype, device=img_embs_concat.device
)
pad_masks = torch.empty(bsize, total_seq_len, dtype=torch.bool, device=img_embs_concat.device)
# Fill pre-allocated tensors
embs[:, : num_images * num_img_embs] = img_embs_concat
embs[:, num_images * num_img_embs :] = lang_emb
pad_masks[:, : num_images * num_img_embs] = img_masks_concat
pad_masks[:, num_images * num_img_embs :] = lang_masks
# Create attention masks (all zeros for full attention between image and language)
att_masks = torch.zeros(total_seq_len, dtype=torch.bool, device=pad_masks.device)
att_masks = att_masks[None, :].expand(bsize, total_seq_len)
return embs, pad_masks, att_masks
def embed_suffix(
self, state: torch.Tensor, noisy_actions: torch.Tensor, timestep: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor | None]:
"""Embed state, action and time tokens as the transformer suffix.
Args:
state: (B, state_dim) robot state; ignored when pi05 is enabled.
noisy_actions: (B, n_action_steps, action_dim) current x_t.
timestep: (B,) diffusion time in [0, 1].
Returns:
(embs, pad_masks, att_masks, adarms_cond) where:
- embs: (B, Ns, D) suffix embeddings
- pad_masks: (B, Ns) valid mask
- att_masks: (B, Ns) causal scheme for suffix
- adarms_cond: (B, D) AdaRMS conditioning or None
"""
embs = []
pad_masks = []
att_masks = []
action_emb = self.action_in_proj(noisy_actions)
bsize = action_emb.shape[0]
dtype = action_emb.dtype
device = action_emb.device
# Embed state
if not self.pi05_enabled:
state_emb = self.state_proj(state)
embs.append(state_emb[:, None, :])
state_mask = torch.ones(bsize, 1, dtype=torch.bool, device=device)
pad_masks.append(state_mask)
# Set attention masks so that image and language inputs do not attend to state or actions
att_masks += [1]
# Embed timestep using sine-cosine positional encoding with sensitivity in the range [0, 1]
time_emb = create_sinusoidal_pos_embedding(
timestep, self.proj_width, min_period=4e-3, max_period=4.0, device=device
)
time_emb = time_emb.type(dtype=dtype)
if self.pi05_enabled:
# time MLP (for adaRMS)
time_emb = self.time_mlp_in(time_emb)
time_emb = F.silu(time_emb)
time_emb = self.time_mlp_out(time_emb)
time_emb = F.silu(time_emb)
action_expert_emb = action_emb
adarms_cond = time_emb
else:
# Fuse timestep + action information using an MLP
time_emb = time_emb[:, None, :].expand_as(action_emb)
action_time_emb = torch.cat([action_emb, time_emb], dim=2)
action_time_emb = self.action_time_mlp_in(action_time_emb)
action_time_emb = F.silu(action_time_emb) # swish == silu
action_time_emb = self.action_time_mlp_out(action_time_emb)
action_expert_emb = action_time_emb
adarms_cond = None
# Add to input tokens
embs.append(action_expert_emb)
bsize, action_time_dim = action_expert_emb.shape[:2]
action_time_mask = torch.ones(bsize, action_time_dim, dtype=torch.bool, device=device)
pad_masks.append(action_time_mask)
# Set attention masks so that image, language and state inputs do not attend to action tokens
att_masks += [1] + ([0] * (self.n_action_steps - 1))
embs = torch.cat(embs, dim=1)
pad_masks = torch.cat(pad_masks, dim=1)
att_masks = torch.tensor(att_masks, dtype=embs.dtype, device=embs.device)
att_masks = att_masks[None, :].expand(bsize, len(att_masks))
return embs, pad_masks, att_masks, adarms_cond
@torch.no_grad()
def sample_actions(
self,
images: list[torch.Tensor],
img_masks: list[torch.Tensor],
lang_tokens: torch.Tensor,
lang_masks: torch.Tensor,
state: torch.Tensor,
noise: Tensor | None = None,
) -> Tensor:
"""Run the full inference loop to predict an action trajectory.
Args:
images: List of (B, C, H, W) image tensors.
img_masks: List of (B,) boolean masks.
lang_tokens: (B, L) token ids.
lang_masks: (B, L) boolean mask for tokens.
state: (B, state_dim) robot state.
noise: Optional initial noise; if None, generated internally.
Returns:
Predicted actions with shape (B, n_action_steps, action_dim).
"""
bsize = lang_tokens.shape[0]
device = lang_tokens.device
if noise is None:
actions_shape = (bsize, self.n_action_steps, self.max_action_dim)
noise = self.sample_noise(actions_shape, device)
prefix_embs, prefix_pad_masks, prefix_att_masks = self.embed_prefix(images, img_masks, lang_tokens, lang_masks)
prefix_att_2d_masks = make_att_2d_masks(prefix_pad_masks, prefix_att_masks)
prefix_position_ids = torch.cumsum(prefix_pad_masks, dim=1) - 1
# Compute image and language key value cache
_, past_key_values = self.paligemma_with_expert.forward(
attention_mask=prefix_att_2d_masks,
position_ids=prefix_position_ids,
past_key_values=None,
inputs_embeds=[prefix_embs, None],
use_cache=self.use_cache,
fill_kv_cache=True,
adarms_cond=[None, None],
)
x_t = noise
dt = -1.0 / self.num_steps
timesteps = torch.arange(1.0, -dt / 2, dt, dtype=torch.float32, device=device)
for timestep in timesteps:
v_t = self.denoise_step(
state,
prefix_pad_masks,
past_key_values,
x_t,
timestep.expand(bsize),
)
x_t += dt * v_t
return x_t
def denoise_step(
self,
state: torch.Tensor,
prefix_pad_masks: torch.Tensor,
past_key_values: dict,
x_t: torch.Tensor,
timestep: torch.Tensor,
) -> torch.Tensor:
"""Apply one denoising step of the noise x_t at a given timestep.
Args:
state: (B, state_dim) robot state.
prefix_pad_masks: (B, Np) prefix pad masks computed from embed_prefix.
past_key_values: KV cache dict for the prefix (images+language).
x_t: (B, n_action_steps, action_dim) current noisy actions.
timestep: (B,) current time in [0, 1].
Returns:
v_t prediction with shape (B, n_action_steps, action_dim).
"""
suffix_embs, suffix_pad_masks, suffix_att_masks, adarms_cond = self.embed_suffix(state, x_t, timestep)
suffix_len = suffix_pad_masks.shape[1]
batch_size = prefix_pad_masks.shape[0]
prefix_len = prefix_pad_masks.shape[1]
prefix_pad_2d_masks = prefix_pad_masks[:, None, :].expand(batch_size, suffix_len, prefix_len)
suffix_att_2d_masks = make_att_2d_masks(suffix_pad_masks, suffix_att_masks)
full_att_2d_masks = torch.cat([prefix_pad_2d_masks, suffix_att_2d_masks], dim=2)
prefix_offsets = torch.sum(prefix_pad_masks, dim=-1)[:, None]
position_ids = prefix_offsets + torch.cumsum(suffix_pad_masks, dim=1) - 1
outputs_embeds, _ = self.paligemma_with_expert.forward(
attention_mask=full_att_2d_masks,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=[None, suffix_embs],
use_cache=self.use_cache,
fill_kv_cache=False,
adarms_cond=[None, adarms_cond],
)
suffix_out = outputs_embeds[1]
suffix_out = suffix_out[:, -self.n_action_steps :]
suffix_out = suffix_out.to(dtype=self.action_out_proj.weight.dtype)
v_t = self.action_out_proj(suffix_out)
return v_t
|
verl__experimental__vla__models__pi0_torch__model__modeling_pi0.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright 2025 Giga Team. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# from https://github.com/open-gigaai/giga-models
from typing import Optional
import torch
import torch.nn.functional as F
from torch import nn
from transformers.activations import ACT2FN
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from transformers.models.auto import CONFIG_MAPPING
from transformers.models.siglip.configuration_siglip import SiglipVisionConfig
from transformers.models.siglip.modeling_siglip import (
SiglipEncoder,
SiglipMultiheadAttentionPoolingHead,
SiglipVisionEmbeddings,
)
from transformers.utils import can_return_tuple
from verl.utils.device import get_device_name
def get_transformers_siglip_vision_config() -> SiglipVisionConfig:
return CONFIG_MAPPING["siglip_vision_model"](
hidden_size=1152,
intermediate_size=4304,
num_channels=3,
num_attention_heads=16,
num_hidden_layers=27,
num_image_tokens=256,
patch_size=14,
projection_dim=2048,
projector_hidden_act="gelu_fast",
torch_dtype="float32",
vision_use_head=False,
)
class GemmaRMSNorm(nn.Module):
def __init__(self, dim: int, eps: float = 1e-6, use_ada_rms_norm: bool = False):
super().__init__()
self.eps = eps
self.use_ada_rms_norm = use_ada_rms_norm
if use_ada_rms_norm:
self.dense = nn.Linear(dim, dim * 3, bias=True)
nn.init.zeros_(self.dense.weight)
else:
self.weight = nn.Parameter(torch.zeros(dim))
def _norm(self, x):
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
def forward(self, x, cond: torch.Tensor | None = None):
normed_inputs = self._norm(x.float())
if self.use_ada_rms_norm:
modulation = self.dense(cond)
scale, shift, gate = torch.chunk(modulation.unsqueeze(1), 3, dim=-1)
normed_inputs = normed_inputs.float() * (1.0 + scale.float()) + shift.float()
return normed_inputs.type_as(x), gate.type_as(x)
# Llama does x.to(float16) * w whilst Gemma is (x * w).to(float16)
# See https://github.com/huggingface/transformers/pull/29402
output = normed_inputs * (1.0 + self.weight.float())
return output.type_as(x)
def extra_repr(self):
if self.use_ada_rms_norm:
return f"{tuple(self.dense.weight.shape)}, eps={self.eps}, use_ada_rms_norm=True"
else:
return f"{tuple(self.weight.shape)}, eps={self.eps}"
class SiglipVisionTransformer(nn.Module):
def __init__(self, config: SiglipVisionConfig):
super().__init__()
self.config = config
self.config._attn_implementation = "sdpa"
embed_dim = config.hidden_size
self.embeddings = SiglipVisionEmbeddings(config)
self.encoder = SiglipEncoder(config)
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.use_head = True if not hasattr(config, "vision_use_head") else config.vision_use_head
if self.use_head:
self.head = SiglipMultiheadAttentionPoolingHead(config)
@can_return_tuple
# @auto_docstring
def forward(
self,
pixel_values,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: Optional[bool] = False,
) -> BaseModelOutputWithPooling:
"""Forward pass of the SigLIP vision encoder.
Args:
pixel_values: Image tensor expected by SigLIP (B, C, H, W).
output_attentions: Whether to return attention maps.
output_hidden_states: Whether to return hidden states.
interpolate_pos_encoding: Enable pos-encoding interpolation for different sizes.
Returns:
BaseModelOutputWithPooling with last_hidden_state and optionally pooled output.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
hidden_states = hidden_states.to(dtype=torch.bfloat16)
with torch.autocast(device_type=get_device_name(), dtype=torch.bfloat16):
encoder_outputs: BaseModelOutput = self.encoder(
inputs_embeds=hidden_states,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
last_hidden_state = encoder_outputs.last_hidden_state
last_hidden_state = self.post_layernorm(last_hidden_state)
pooler_output = self.head(last_hidden_state) if self.use_head else None
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooler_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
# Copied from transformers.models.paligemma.modeling_paligemma.PaliGemmaMultiModalProjector
class PaliGemmaMultiModalProjector(nn.Module):
def __init__(self, vision_hidden_size: int = 1152, projection_dim: int = 2048):
super().__init__()
self.linear = nn.Linear(vision_hidden_size, projection_dim, bias=True)
def forward(self, image_features: torch.Tensor) -> torch.Tensor:
"""Project vision features to the transformer hidden size."""
hidden_states = self.linear(image_features)
return hidden_states
class RoPEEmbedding(nn.Module):
"""Precomputed RoPE embeddings for improved performance.
This implementation precomputes sin/cos values for a maximum sequence length, avoiding redundant trigonometric
calculations during forward passes.
"""
def __init__(self, dim: int, max_wavelength: int = 10_000, max_seq_len: int = 8192):
super().__init__()
self.dim = dim
self.max_wavelength = max_wavelength
self.max_seq_len = max_seq_len
# Precompute frequency exponents and inverse frequencies
d_half = dim // 2
freq_exponents = (2.0 / dim) * torch.arange(d_half, dtype=torch.float32)
inv_freq = 1.0 / (max_wavelength**freq_exponents)
# Precompute sin and cos for all positions up to max_seq_len
# Shape: [max_seq_len, d_half]
positions = torch.arange(max_seq_len, dtype=torch.float32)
freqs = torch.outer(positions, inv_freq) # [max_seq_len, d_half]
# Precompute sin and cos values
# We expand to [max_seq_len, 1, d_half] for broadcasting in forward
cos_cached = torch.cos(freqs).unsqueeze(1) # [max_seq_len, 1, d_half]
sin_cached = torch.sin(freqs).unsqueeze(1) # [max_seq_len, 1, d_half]
# Register as buffers so they automatically move to the correct device with the model
self.register_buffer("cos_cached", cos_cached, persistent=False)
self.register_buffer("sin_cached", sin_cached, persistent=False)
def forward(self, x: torch.Tensor, positions: torch.LongTensor) -> torch.Tensor:
"""Applies RoPE positions [B, L] to x [B, L, H, D].
Args:
x: Input tensor of shape [B, L, H, D]
positions: Position indices of shape [B, L]
Returns:
Rotated tensor of shape [B, L, H, D]
"""
dtype = x.dtype
x = x.to(torch.float32)
# Index precomputed sin/cos values using positions
# positions: [B, L] -> cos/sin: [B, L, 1, d_half]
cos = self.cos_cached[positions] # [B, L, 1, d_half]
sin = self.sin_cached[positions] # [B, L, 1, d_half]
# Apply rotary embeddings
d_half = self.dim // 2
x1, x2 = x.split(d_half, dim=-1) # Each: [B, L, H, d_half]
# Rotate: out1 = x1 * cos - x2 * sin, out2 = x2 * cos + x1 * sin
res = torch.empty_like(x)
res[..., :d_half] = x1 * cos - x2 * sin
res[..., d_half:] = x2 * cos + x1 * sin
return res.to(dtype)
class GemmaAttentionWithExpert(nn.Module):
def __init__(
self,
layer_idx: int,
# PaliGemma params
paligemma_hidden_size: int = 2048,
paligemma_num_attention_heads: int = 8,
paligemma_num_key_value_heads: int = 1,
paligemma_head_dim: int = 256,
paligemma_attention_bias: bool = False,
# Expert params
expert_hidden_size: int = 1024,
expert_num_attention_heads: int = 8,
expert_num_key_value_heads: int = 1,
expert_head_dim: int = 256,
expert_attention_bias: bool = False,
# RoPE params
rope_max_wavelength: int = 10_000,
rope_max_seq_len: int = 8192,
):
super().__init__()
self.layer_idx = layer_idx
self.q_proj = nn.ModuleList(
[
nn.Linear(
paligemma_hidden_size,
paligemma_num_attention_heads * paligemma_head_dim,
bias=paligemma_attention_bias,
),
nn.Linear(expert_hidden_size, expert_num_attention_heads * expert_head_dim, bias=expert_attention_bias),
]
)
self.k_proj = nn.ModuleList(
[
nn.Linear(
paligemma_hidden_size,
paligemma_num_key_value_heads * paligemma_head_dim,
bias=paligemma_attention_bias,
),
nn.Linear(expert_hidden_size, expert_num_key_value_heads * expert_head_dim, bias=expert_attention_bias),
]
)
self.v_proj = nn.ModuleList(
[
nn.Linear(
paligemma_hidden_size,
paligemma_num_key_value_heads * paligemma_head_dim,
bias=paligemma_attention_bias,
),
nn.Linear(expert_hidden_size, expert_num_key_value_heads * expert_head_dim, bias=expert_attention_bias),
]
)
self.o_proj = nn.ModuleList(
[
nn.Linear(
paligemma_num_attention_heads * paligemma_head_dim,
paligemma_hidden_size,
bias=paligemma_attention_bias,
),
nn.Linear(expert_num_attention_heads * expert_head_dim, expert_hidden_size, bias=expert_attention_bias),
]
)
self.paligemma_num_attention_heads = paligemma_num_attention_heads
self.paligemma_num_key_value_heads = paligemma_num_key_value_heads
self.paligemma_head_dim = paligemma_head_dim
self.expert_num_attention_heads = expert_num_attention_heads
self.expert_num_key_value_heads = expert_num_key_value_heads
self.expert_head_dim = expert_head_dim
assert paligemma_head_dim == expert_head_dim
assert paligemma_num_attention_heads == expert_num_attention_heads
assert paligemma_num_key_value_heads == expert_num_key_value_heads
self.rope_embedding = RoPEEmbedding(
dim=paligemma_head_dim, max_wavelength=rope_max_wavelength, max_seq_len=rope_max_seq_len
)
def forward(
self,
inputs_embeds: list[Optional[torch.Tensor]],
position_ids: torch.LongTensor,
attention_mask: torch.Tensor,
use_cache: bool,
past_key_values: Optional[dict] = None,
fill_kv_cache: bool = False,
) -> list[Optional[torch.Tensor]]:
"""Multi-source attention over PaliGemma and Expert streams.
Args:
inputs_embeds: [paligemma_embeds, expert_embeds]. Each is (B, L, D) or None.
position_ids: (B, L) rotary positions.
attention_mask: (B, L, L) attention mask.
use_cache: Whether to use KV cache.
past_key_values: Optional cache dict per layer.
fill_kv_cache: If True, fill cache; otherwise, append to it.
Returns:
List[Optional[Tensor]]: outputs per stream aligned to inputs order.
"""
query_states = []
key_states = []
value_states = []
if inputs_embeds[0] is not None:
# PaliGemma
hidden_states = inputs_embeds[0]
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.paligemma_head_dim)
query_states.append(self.q_proj[0](hidden_states).view(hidden_shape))
key_states.append(self.k_proj[0](hidden_states).view(hidden_shape))
value_states.append(self.v_proj[0](hidden_states).view(hidden_shape))
if inputs_embeds[1] is not None:
# Expert
hidden_states = inputs_embeds[1]
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.expert_head_dim)
query_states.append(self.q_proj[1](hidden_states).view(hidden_shape))
key_states.append(self.k_proj[1](hidden_states).view(hidden_shape))
value_states.append(self.v_proj[1](hidden_states).view(hidden_shape))
query_states = torch.cat(query_states, dim=1)
key_states = torch.cat(key_states, dim=1)
value_states = torch.cat(value_states, dim=1)
query_states = self.rope_embedding(query_states, position_ids)
key_states = self.rope_embedding(key_states, position_ids)
if use_cache:
if fill_kv_cache:
past_key_values[self.layer_idx] = {
"key_states": key_states,
"value_states": value_states,
}
else:
key_states = torch.cat([past_key_values[self.layer_idx]["key_states"], key_states], dim=1)
value_states = torch.cat([past_key_values[self.layer_idx]["value_states"], value_states], dim=1)
num_att_heads = self.paligemma_num_attention_heads # Assume same for both
num_key_value_heads = self.paligemma_num_key_value_heads
head_dim = self.paligemma_head_dim
batch_size = query_states.shape[0]
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
if num_key_value_heads != num_att_heads:
# key_states: (B, num_kv_heads, L, D) -> (B, num_att_heads, L, D)
key_states = torch.repeat_interleave(key_states, num_att_heads // num_key_value_heads, dim=1)
value_states = torch.repeat_interleave(value_states, num_att_heads // num_key_value_heads, dim=1)
att_output = F.scaled_dot_product_attention(
query_states,
key_states,
value_states,
attn_mask=attention_mask[:, None, :, :],
is_causal=False,
)
att_output = att_output.permute(0, 2, 1, 3)
att_output = att_output.reshape(batch_size, -1, num_att_heads * head_dim)
outputs_embeds = []
start = 0
if inputs_embeds[0] is not None:
hidden_states = inputs_embeds[0]
end = start + hidden_states.shape[1]
if att_output.dtype != self.o_proj[0].weight.dtype:
att_output_i = att_output[:, start:end].to(self.o_proj[0].weight.dtype)
else:
att_output_i = att_output[:, start:end]
out_emb = self.o_proj[0](att_output_i)
outputs_embeds.append(out_emb)
start = end
else:
outputs_embeds.append(None)
if inputs_embeds[1] is not None:
hidden_states = inputs_embeds[1]
end = start + hidden_states.shape[1]
if att_output.dtype != self.o_proj[1].weight.dtype:
att_output_i = att_output[:, start:end].to(self.o_proj[1].weight.dtype)
else:
att_output_i = att_output[:, start:end]
out_emb = self.o_proj[1](att_output_i)
outputs_embeds.append(out_emb)
else:
outputs_embeds.append(None)
return outputs_embeds
class GemmaMLP(nn.Module):
def __init__(self, hidden_size: int = 1024, intermediate_size: int = 4096, hidden_act: str = "gelu_pytorch_tanh"):
super().__init__()
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[hidden_act]
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Gated MLP block used in both streams."""
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
class GemmaDecoderLayerWithExpert(nn.Module):
def __init__(
self,
layer_idx: int,
pi05_enabled: bool,
# PaliGemma params
paligemma_hidden_size: int = 2048,
paligemma_num_attention_heads: int = 8,
paligemma_num_key_value_heads: int = 1,
paligemma_head_dim: int = 256,
paligemma_attention_bias: bool = False,
paligemma_intermediate_size: int = 16384,
paligemma_hidden_act: str = "gelu_pytorch_tanh",
paligemma_rms_norm_eps: float = 1e-6,
# Expert params
expert_hidden_size: int = 1024,
expert_num_attention_heads: int = 8,
expert_num_key_value_heads: int = 1,
expert_head_dim: int = 256,
expert_attention_bias: bool = False,
expert_intermediate_size: int = 4096,
expert_hidden_act: str = "gelu_pytorch_tanh",
expert_rms_norm_eps: float = 1e-6,
# RoPE params
rope_max_wavelength: int = 10_000,
rope_max_seq_len: int = 8192,
):
super().__init__()
self.self_attn = GemmaAttentionWithExpert(
layer_idx,
paligemma_hidden_size,
paligemma_num_attention_heads,
paligemma_num_key_value_heads,
paligemma_head_dim,
paligemma_attention_bias,
expert_hidden_size,
expert_num_attention_heads,
expert_num_key_value_heads,
expert_head_dim,
expert_attention_bias,
rope_max_wavelength,
rope_max_seq_len,
)
self.mlps = nn.ModuleList(
[
GemmaMLP(paligemma_hidden_size, paligemma_intermediate_size, paligemma_hidden_act),
GemmaMLP(expert_hidden_size, expert_intermediate_size, expert_hidden_act),
]
)
self.input_layernorms = nn.ModuleList(
[
GemmaRMSNorm(paligemma_hidden_size, eps=paligemma_rms_norm_eps),
GemmaRMSNorm(expert_hidden_size, eps=expert_rms_norm_eps, use_ada_rms_norm=pi05_enabled),
]
)
self.post_attention_layernorms = nn.ModuleList(
[
GemmaRMSNorm(paligemma_hidden_size, eps=paligemma_rms_norm_eps),
GemmaRMSNorm(expert_hidden_size, eps=expert_rms_norm_eps, use_ada_rms_norm=pi05_enabled),
]
)
self.pi05_enabled = pi05_enabled
def gated_residual(self, x, y, gate):
if x is None or y is None:
return None
if gate is None:
return x + y
return x + y * gate
def forward(
self,
inputs_embeds: list[Optional[torch.Tensor]],
adarms_cond: list[Optional[torch.Tensor]],
position_ids: torch.LongTensor,
attention_mask: torch.Tensor,
use_cache: bool,
past_key_values: Optional[dict] = None,
fill_kv_cache: bool = False,
) -> list[Optional[torch.Tensor]]:
"""Decoder layer with dual-stream attention and optional AdaRMS
modulation.
Args:
inputs_embeds: [paligemma, expert] embeds.
adarms_cond: Optional conditioning vectors for AdaRMS.
position_ids: (B, L) positions for RoPE.
attention_mask: (B, L, L) attention mask.
use_cache: Whether to use KV cache.
past_key_values: Optional cache dict.
fill_kv_cache: Whether to fill or reuse KV cache.
Returns:
List[Optional[Tensor]]: Updated hidden states per stream.
"""
residuals = list(inputs_embeds)
normed_embeds = []
attn_gates = []
for i, hidden_states in enumerate(inputs_embeds):
if hidden_states is not None:
if self.pi05_enabled and adarms_cond[i] is not None:
normed_h, attn_gate = self.input_layernorms[i](hidden_states, adarms_cond[i])
normed_embeds.append(normed_h)
attn_gates.append(attn_gate)
else:
normed_embeds.append(self.input_layernorms[i](hidden_states))
attn_gates.append(None)
else:
normed_embeds.append(None)
attn_gates.append(None)
attn_outputs = self.self_attn(
normed_embeds, position_ids, attention_mask, use_cache, past_key_values, fill_kv_cache
)
after_attn_embeds = []
for i, (residual, attn_output, attn_gate) in enumerate(zip(residuals, attn_outputs, attn_gates, strict=False)):
if residual is not None:
after_attn_embeds.append(self.gated_residual(residual, attn_output, attn_gate))
else:
after_attn_embeds.append(None)
outputs = []
for i, hidden_states in enumerate(after_attn_embeds):
if hidden_states is not None:
residual = hidden_states
if self.pi05_enabled and adarms_cond[i] is not None:
normed_h, mlp_gate = self.post_attention_layernorms[i](hidden_states, adarms_cond[i])
else:
normed_h = self.post_attention_layernorms[i](hidden_states)
mlp_gate = None
mlp_out = self.mlps[i](normed_h)
outputs.append(self.gated_residual(residual, mlp_out, mlp_gate))
else:
outputs.append(None)
return outputs, past_key_values
class PaliGemmaWithExpertModel(nn.Module):
def __init__(
self,
pi05_enabled: bool = False,
# Paligemma params
paligemma_vocab_size: int = 257152,
paligemma_pad_token_id: int = 0,
paligemma_num_hidden_layers: int = 18,
paligemma_hidden_size: int = 2048,
paligemma_num_attention_heads: int = 8,
paligemma_num_key_value_heads: int = 1,
paligemma_attention_bias: bool = False,
paligemma_intermediate_size: int = 16384,
paligemma_hidden_act: str = "gelu_pytorch_tanh",
paligemma_rms_norm_eps: float = 1e-6,
# Expert params
expert_hidden_size: int = 1024,
expert_num_attention_heads: int = 8,
expert_num_key_value_heads: int = 1,
expert_head_dim: int = 256,
expert_attention_bias: bool = False,
expert_intermediate_size: int = 4096,
expert_hidden_act: str = "gelu_pytorch_tanh",
expert_rms_norm_eps: float = 1e-6,
# RoPE params
rope_max_wavelength: int = 10_000,
rope_max_seq_len: int = 8192,
):
super().__init__()
self.pi05_enabled = pi05_enabled
siglip_vision_config = get_transformers_siglip_vision_config()
# Vision and projection
self.vision_tower = SiglipVisionTransformer(siglip_vision_config)
self.multi_modal_projector = PaliGemmaMultiModalProjector(
vision_hidden_size=siglip_vision_config.hidden_size, projection_dim=siglip_vision_config.projection_dim
)
self.paligemma_hidden_size = paligemma_hidden_size
# Language embed
self.embed_tokens = nn.Embedding(paligemma_vocab_size, paligemma_hidden_size, paligemma_pad_token_id)
# Decoder layers
self.layers = nn.ModuleList(
[
GemmaDecoderLayerWithExpert(
layer_idx=i,
pi05_enabled=pi05_enabled,
paligemma_hidden_size=paligemma_hidden_size,
paligemma_num_attention_heads=paligemma_num_attention_heads,
paligemma_num_key_value_heads=paligemma_num_key_value_heads,
paligemma_head_dim=paligemma_hidden_size // paligemma_num_attention_heads,
paligemma_attention_bias=paligemma_attention_bias, # gemma default
paligemma_intermediate_size=paligemma_intermediate_size,
paligemma_hidden_act=paligemma_hidden_act,
paligemma_rms_norm_eps=paligemma_rms_norm_eps, # gemma default
expert_hidden_size=expert_hidden_size,
expert_num_attention_heads=expert_num_attention_heads,
expert_num_key_value_heads=expert_num_key_value_heads,
expert_head_dim=expert_head_dim,
expert_attention_bias=expert_attention_bias,
expert_intermediate_size=expert_intermediate_size,
expert_hidden_act=expert_hidden_act,
expert_rms_norm_eps=expert_rms_norm_eps,
rope_max_wavelength=rope_max_wavelength,
rope_max_seq_len=rope_max_seq_len,
)
for i in range(paligemma_num_hidden_layers)
]
)
# Final norms
self.norms = nn.ModuleList(
[
GemmaRMSNorm(paligemma_hidden_size, eps=1e-6),
GemmaRMSNorm(expert_hidden_size, eps=expert_rms_norm_eps, use_ada_rms_norm=pi05_enabled),
]
)
def embed_image(self, image: torch.Tensor) -> torch.Tensor:
"""Encode images with SigLIP and project to hidden size."""
image_outputs = self.vision_tower(image)
selected_image_feature = image_outputs.last_hidden_state
image_features = self.multi_modal_projector(selected_image_feature)
return image_features
def embed_language_tokens(self, tokens: torch.Tensor) -> torch.Tensor:
"""Embed token ids into continuous vectors."""
return self.embed_tokens(tokens)
def forward(
self,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[dict] = None,
inputs_embeds: list[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
fill_kv_cache: Optional[bool] = None,
adarms_cond: list[torch.FloatTensor] = None,
) -> tuple[list[Optional[torch.Tensor]], dict]:
"""Run the stacked dual-stream decoder with optional caching and
AdaRMS.
Args:
attention_mask: (B, L, L) attention mask for both streams.
position_ids: (B, L) RoPE positions.
past_key_values: Optional KV cache dict to reuse.
inputs_embeds: [paligemma_embeds, expert_embeds].
use_cache: Whether to use KV cache.
fill_kv_cache: If True, populate cache from inputs.
adarms_cond: Optional per-stream modulation vectors for AdaRMS.
Returns:
(outputs_embeds, past_key_values): outputs per stream and the KV cache.
"""
inputs_embeds = [
input_embed.to(dtype=torch.bfloat16) if input_embed is not None else None for input_embed in inputs_embeds
]
with torch.autocast(device_type=get_device_name(), dtype=torch.bfloat16):
if use_cache and past_key_values is None:
past_key_values = {}
hidden_states_list = inputs_embeds
for layer in self.layers:
# FSDP will make a copy of the "past_key_values" dictionary, which needs to be reassigned.
hidden_states_list, past_key_values = layer(
hidden_states_list,
adarms_cond=adarms_cond,
position_ids=position_ids,
attention_mask=attention_mask,
use_cache=use_cache,
past_key_values=past_key_values,
fill_kv_cache=fill_kv_cache,
)
outputs_embeds = []
for i, hidden_states in enumerate(hidden_states_list):
if hidden_states is not None:
if self.pi05_enabled and adarms_cond[i] is not None:
out_emb, _ = self.norms[i](hidden_states, adarms_cond[i])
else:
out_emb = self.norms[i](hidden_states)
outputs_embeds.append(out_emb)
else:
outputs_embeds.append(None)
return outputs_embeds, past_key_values
|
verl__experimental__vla__models__pi0_torch__model__paligemma_with_expert.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Literal
import torch
from onnx_ir import Tensor
from torch import nn
from torch.distributed.fsdp import register_fsdp_forward_method
from torch.distributions import Normal
from transformers import PreTrainedModel
from typing_extensions import override
from verl.protocol import DataProto
from verl.utils.device import get_device_name
from ...sac.base import SupportSACTraining
from ..modules.mlp import MLP
from .configuration_pi0_torch import PI0TorchConfig
from .model.modeling_pi0 import PI0Model, make_att_2d_masks
from .pi0_utils import (
ImageTransform,
Normalize,
PromptTokenizerTransform,
Unnormalize,
)
from .policy.base import Pi0Output
class PI0ForActionPrediction(PreTrainedModel, SupportSACTraining):
config_class = PI0TorchConfig
base_model_prefix = "pi0_torch"
def __init__(self, config: PI0TorchConfig):
super().__init__(config)
self.model: PI0Model = None
self.state_norm_stats = config.state_norm_stats
self.action_norm_stats = config.action_norm_stats
self.pi05_enabled = config.pi05_enabled
assert self.state_norm_stats, "state_norm_stats must be provided in PI0TorchConfig"
assert self.action_norm_stats, "action_norm_stats must be provided in PI0TorchConfig"
assert isinstance(self.pi05_enabled, bool), "pi05_enabled must be provided in PI0TorchConfig"
# Input transforms
self.state_normalize_transform = Normalize(self.state_norm_stats, use_quantiles=self.pi05_enabled)
self.action_normalize_transform = Normalize(self.action_norm_stats, use_quantiles=self.pi05_enabled)
self.image_transform = ImageTransform(resize_imgs_with_padding=(224, 224), enable_image_aug=False)
max_length = 200 if self.pi05_enabled else 48
self.prompt_tokenizer_transform = PromptTokenizerTransform(max_length=max_length, discrete_state_input=False)
# Output transforms
self.state_unnormalize_transform = Unnormalize(self.state_norm_stats, use_quantiles=self.pi05_enabled)
self.action_unnormalize_transform = Unnormalize(self.action_norm_stats, use_quantiles=self.pi05_enabled)
self._to(get_device_name())
##### SAC Algorithm Support #####
if getattr(self.config, "sac_enable", False):
head_num = 2 if getattr(self.config, "double_q", True) else 1
self.critic_heads = nn.ModuleList(
[
MLP(
input_dim=2150, # 2048(prefix mean) + 32(state) + 10*7(action flat)
hidden_dims=[1024, 512, 256],
output_dim=1,
activation="relu",
init_method="normal",
)
for _ in range(head_num)
]
)
self.target_network_heads = nn.ModuleList(
[
MLP(
input_dim=2150,
hidden_dims=[1024, 512, 256],
output_dim=1,
activation="relu",
init_method="normal",
)
for _ in range(head_num)
]
)
def _to(self, device: torch.device | str):
self.state_normalize_transform.to(device)
self.state_unnormalize_transform.to(device)
self.action_normalize_transform.to(device)
self.action_unnormalize_transform.to(device)
return self
def forward(
self,
images: list[torch.Tensor],
img_masks: list[torch.Tensor],
lang_tokens: torch.Tensor,
lang_masks: torch.Tensor,
state: torch.Tensor,
x_t: torch.Tensor,
timestep: torch.Tensor,
) -> Tensor:
"""Full forward pass for one diffusion denoising step.
Args:
images: List of image tensors, each shaped (B, C, H, W) after batching.
img_masks: List of boolean masks corresponding to images, each (B,).
lang_tokens: Language token ids (B, L).
lang_masks: Language attention mask (B, L) with True for valid tokens.
state: State tensor (B, state_dim) if pi05 is disabled else ignored.
x_t: Noisy action tokens (B, n_action_steps, action_dim).
timestep: Diffusion timestep as float tensor (B,).
Returns:
Predicted v_t with shape (B, n_action_steps, action_dim).
"""
if self.model is None:
raise RuntimeError("PI0ForActionPrediction.model is not initialized. Did from_pretrained() run?")
return self.model(
images,
img_masks,
lang_tokens,
lang_masks,
state,
x_t,
timestep,
)
@torch.no_grad()
def sample_actions(
self,
env_obs: DataProto,
tokenizer,
) -> tuple[Pi0Output, dict, dict]:
"""Run one forward pass from raw inputs to final action sequence.
Args:
env_obs: The environment observations as DataProto.
tokenizer: The tokenizer used for prompt tokenization.
Returns:
A tuple of (pi0_output, s, a):
- pi0_output: The Pi0Output containing the predicted actions.
- s: Dictionary of tensors representing the states, with keys
- "images": torch.Tensor of shape (B, n_images, C, H, W)
- "image_masks": torch.Tensor of shape (B, n_images)
- "lang_tokens": torch.Tensor of shape (B, L)
- "lang_masks": torch.Tensor of shape (B, L)
- "states": torch.Tensor of shape (B, state_dim)
- a: Dictionary of tensors representing actions, with key:
- "full_action": torch.Tensor of shape (B, action_steps, action_dim)
"""
from .policy.libero_policy import LiberoPi0Input
pi0_input = LiberoPi0Input.from_env_obs(env_obs)
# Input transforms
state = self.state_normalize_transform(pi0_input.state)
images, _ = self.image_transform.call_batch(pi0_input.images)
lang_tokens, lang_masks = self.prompt_tokenizer_transform.call_batch(
{"task": pi0_input.task, "observation.state": state}, tokenizer
)
# Inference
pred_action = self.model.sample_actions(images, pi0_input.img_masks, lang_tokens, lang_masks, state=state)
# Output transforms
# state = self.state_unnormalize_transform(state)
pred_action = self.action_unnormalize_transform(pred_action)
from .policy.libero_policy import LiberoPi0Output
pi0_output = LiberoPi0Output.from_model_output({"full_action": pred_action})
s = {
"states": state,
"images": torch.stack(images, dim=1),
"image_masks": torch.stack(pi0_input.img_masks, dim=1),
"lang_tokens": lang_tokens,
"lang_masks": lang_masks,
}
a = {
"full_action": pred_action,
}
return pi0_output, s, a
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop("config", None)
if config is None:
config = PI0TorchConfig.from_pretrained(pretrained_model_name_or_path)
policy = cls(config)
policy.model = PI0Model.from_pretrained(pretrained_model_name_or_path)
return policy
def freeze_vision_tower(self) -> None:
"""Freeze the vision tower parameters."""
if self.model is None:
raise RuntimeError("PI0ForActionPrediction.model is not initialized. Did from_pretrained() run?")
vision_tower = self.model.paligemma_with_expert.vision_tower
vision_tower.requires_grad_(False)
vision_tower.eval()
# --- SAC Algorithm Support ---
def _multi_heads_value(
self, value_heads: nn.ModuleList, input_tensor: torch.Tensor, method: Literal["cat", "min"] = "cat"
) -> torch.Tensor:
q_values = [head(input_tensor) for head in value_heads]
if method == "cat":
q_values = torch.cat(q_values, dim=-1)
elif method == "min":
q_values = torch.min(torch.cat(q_values, dim=-1), dim=-1).values
else:
raise ValueError(f"Unknown method: {method}")
return q_values
def _build_kv_cache_from_prefix(
self,
prefix_features: tuple[torch.Tensor, torch.Tensor, torch.Tensor],
):
"""Build KV cache for prefix. No grad needed."""
prefix_embs, prefix_pad_masks, prefix_att_masks = prefix_features
prefix_att_2d_masks = make_att_2d_masks(prefix_pad_masks, prefix_att_masks)
prefix_position_ids = torch.cumsum(prefix_pad_masks, dim=1) - 1
with torch.no_grad():
_, past_key_values = self.model.paligemma_with_expert.forward(
attention_mask=prefix_att_2d_masks,
position_ids=prefix_position_ids,
past_key_values=None,
inputs_embeds=[prefix_embs, None],
use_cache=self.model.use_cache,
fill_kv_cache=True,
adarms_cond=[None, None],
)
return past_key_values
def _get_logprobs(
self,
s: dict[str, torch.Tensor],
prefix_features: tuple[torch.Tensor, torch.Tensor, torch.Tensor],
*,
x_t: torch.Tensor | None = None, # (B, T, A)
x_next: torch.Tensor | None = None, # (B, T, A)
v_t: torch.Tensor | None = None, # (B, T, A)
t: torch.Tensor | None = None, # (B,)
step_idx: torch.Tensor | None = None, # (B,)
) -> torch.Tensor:
"""
Compute log-probability of x_{t+1} given (x_t, v_t) under the Flow-SDE formulation.
See https://arxiv.org/abs/2510.25889
"""
prefix_embs, prefix_pad_masks, _ = prefix_features
states = s["states"]
B = prefix_embs.shape[0]
device = prefix_embs.device
past_key_values = self._build_kv_cache_from_prefix(prefix_features)
if x_t is None or x_next is None or v_t is None or t is None:
actions_shape = (B, self.model.n_action_steps, self.model.max_action_dim)
x = self.model.sample_noise(actions_shape, device=device)
dt = -1.0 / float(self.model.num_steps)
t_grid = torch.arange(1.0, -dt / 2, dt, dtype=torch.float32, device=device)
x_prev, v_prev, t_prev = None, None, None
for tt in t_grid:
x_prev = x
t_prev = tt
v_prev = self.model.denoise_step(
states,
prefix_pad_masks,
past_key_values,
x,
tt.expand(B),
)
x = x + dt * v_prev
x_t = x_prev
x_next = x
v_t = v_prev
t = t_prev.expand(B)
# sigma schedule step index
K = int(self.model.num_steps)
if step_idx is None:
step_idx = torch.full((B,), K - 1, device=device, dtype=torch.long)
# one-step mean/std
dt_pos = 1.0 / float(K)
t_b = t[:, None, None] # (B,1,1)
dt_b = torch.full_like(t_b, dt_pos)
x0_pred = x_t - v_t * t_b
x1_pred = x_t + v_t * (1.0 - t_b)
# heuristic sigma schedule (ported family)
noise_level = 0.5
t_grid_full = torch.arange(1.0, -dt_pos / 2, -dt_pos, dtype=torch.float32, device=device) # len=K+1
t_for_sigma = torch.where(t_grid_full == 1.0, t_grid_full[1], t_grid_full)
sigmas = noise_level * torch.sqrt(t_grid_full / (1.0 - t_for_sigma).clamp_min(1e-6))
sigmas = sigmas[:-1] # len=K
sigma_i = sigmas[step_idx][:, None, None].clamp_min(1e-6) # (B,1,1)
x0_weight = torch.ones_like(t_b) - (t_b - dt_b)
x1_weight = t_b - dt_b - (sigma_i**2) * dt_b / (2.0 * t_b.clamp_min(1e-6))
x_next_mean = x0_pred * x0_weight + x1_pred * x1_weight
x_next_std = (dt_b.sqrt() * sigma_i).clamp_min(1e-6)
dist = Normal(x_next_mean.float(), x_next_std.float())
log_probs = dist.log_prob(x_next.float()).sum(dim=2).mean(dim=1) # (B,)
return log_probs
def _sample_actions_and_logprobs_from_prefix(
self,
states: torch.Tensor,
prefix_features: tuple[torch.Tensor, torch.Tensor, torch.Tensor],
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Sample actions amd compute logprob aligned with those sampled actions.
Args:
states: (B, state_dim)
prefix_features: tuple of (prefix_embs, prefix_pad_masks, prefix_att_masks)
Returns:
actions: (B, n_action_steps, action_dim)
log_probs: (B,)
"""
prefix_embs, prefix_pad_masks, _ = prefix_features
B = prefix_embs.shape[0]
device = prefix_embs.device
past_key_values = self._build_kv_cache_from_prefix(prefix_features)
actions_shape = (B, self.model.n_action_steps, self.model.max_action_dim)
x = self.model.sample_noise(actions_shape, device=device)
dt = -1.0 / float(self.model.num_steps)
t_grid = torch.arange(1.0, -dt / 2, dt, dtype=torch.float32, device=device) # len=K
x_prev, v_prev, t_prev = None, None, None
for tt in t_grid:
x_prev = x
t_prev = tt
v_prev = self.model.denoise_step(
states,
prefix_pad_masks,
past_key_values,
x,
tt.expand(B),
)
x = x + dt * v_prev
actions = x # x_K
# aligned logprob: use last transition (K-1)
step_idx = torch.full((B,), int(self.model.num_steps) - 1, device=device, dtype=torch.long)
log_probs = self._get_logprobs(
{"states": states},
prefix_features,
x_t=x_prev,
x_next=actions,
v_t=v_prev,
t=t_prev.expand(B),
step_idx=step_idx,
)
return actions, log_probs
@override
def sac_init(self):
"""Initialize SAC-related components."""
self.freeze_vision_tower()
register_fsdp_forward_method(self, "sac_forward_critic")
register_fsdp_forward_method(self, "sac_forward_actor")
register_fsdp_forward_method(self, "sac_update_target_network")
register_fsdp_forward_method(self, "sac_forward_state_features")
@override
def sac_forward_actor(
self,
state_features: tuple[tuple[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor],
) -> tuple[torch.Tensor, torch.Tensor]:
prefix_features, states = state_features
actions, log_probs = self._sample_actions_and_logprobs_from_prefix(states, prefix_features)
return actions, log_probs
@override
def sac_forward_critic(
self,
a: dict[str, torch.Tensor],
state_features: tuple[tuple[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor],
*,
use_target_network: bool = False,
method: Literal["cat", "min"] = "cat",
requires_grad: bool = False,
):
critic_head = self.target_network_heads if use_target_network else self.critic_heads
for p in critic_head.parameters():
p.requires_grad_(requires_grad)
prefix_features, states = state_features
prefix_embs, _, _ = prefix_features
mean_prefix_embs = prefix_embs.mean(dim=1, keepdim=False) # (B, 2048)
actions = self.action_normalize_transform(a["full_action"]) # (B, 50, 32)
actions = actions[:, :10, :7] # (B, 10, 7)
flattened_actions = actions.reshape(actions.shape[0], -1) # (B, 70)
critic_input = torch.cat([mean_prefix_embs, states, flattened_actions], dim=-1) # (B, 2150)
q_values = self._multi_heads_value(critic_head, critic_input, method=method)
return q_values
@override
def sac_forward_state_features(
self, s: dict[str, torch.Tensor]
) -> tuple[tuple[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor]:
with torch.no_grad():
prefix_features = self.model.embed_prefix(
images=s["images"].unbind(dim=1),
img_masks=s["image_masks"].unbind(dim=1),
lang_tokens=s["lang_tokens"],
lang_masks=s["lang_masks"],
)
return (prefix_features, s["states"])
@override
@torch.no_grad()
def sac_update_target_network(self, tau: float):
for target_head, head in zip(self.target_network_heads, self.critic_heads, strict=False):
for target_param, param in zip(target_head.parameters(), head.parameters(), strict=False):
target_param.data.mul_(1.0 - tau).add_(param.data, alpha=tau)
|
verl__experimental__vla__models__pi0_torch__modeling_pi0_torch.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright 2025 Giga Team. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# from https://github.com/open-gigaai/giga-models
from typing import Any
import torch
import torch.nn.functional as F
from torchvision import transforms
class Normalize:
"""Normalize robot state vectors using mean/std or quantiles.
Args:
stats: A dict containing either {'mean', 'std'} or {'q01', 'q99'}.
use_quantiles: If True, use quantile based normalization.
"""
def __init__(self, stats: dict[str, Any], *, use_quantiles: bool = False) -> None:
self.EPSILON = 1e-6
self.stats = stats
self.use_quantiles = use_quantiles
required_attrs = ["mean", "std"]
if self.use_quantiles:
required_attrs = ["q01", "q99"]
for attr in required_attrs:
if attr not in stats:
raise AttributeError(f"stats object is missing the following attribute: {attr}")
if self.use_quantiles:
self.q01 = torch.tensor(stats["q01"], dtype=torch.float32)
self.q99 = torch.tensor(stats["q99"], dtype=torch.float32)
else:
self.mean = torch.tensor(stats["mean"], dtype=torch.float32)
self.std = torch.tensor(stats["std"], dtype=torch.float32)
def to(self, device: torch.device | str) -> None:
if self.use_quantiles:
self.q01 = self.q01.to(device)
self.q99 = self.q99.to(device)
else:
self.mean = self.mean.to(device)
self.std = self.std.to(device)
def __call__(self, x: torch.Tensor) -> torch.Tensor:
x_dim = x.shape[-1]
if self.use_quantiles:
return (x - self.q01[..., :x_dim]) / (
self.q99[..., :x_dim] - self.q01[..., :x_dim] + self.EPSILON
) * 2.0 - 1.0
else:
return (x - self.mean[..., :x_dim]) / (self.std[..., :x_dim] + self.EPSILON)
class Unnormalize:
def __init__(self, stats, *, use_quantiles: bool = False):
self.EPSILON = 1e-6
self.stats = stats
self.use_quantiles = use_quantiles
if self.use_quantiles:
self.q01 = torch.tensor(stats["q01"], dtype=torch.float32)
self.q99 = torch.tensor(stats["q99"], dtype=torch.float32)
else:
self.mean = torch.tensor(stats["mean"], dtype=torch.float32)
self.std = torch.tensor(stats["std"], dtype=torch.float32)
def to(self, device: torch.device | str) -> None:
if self.use_quantiles:
self.q01 = self.q01.to(device)
self.q99 = self.q99.to(device)
else:
self.mean = self.mean.to(device)
self.std = self.std.to(device)
def __call__(self, x: torch.Tensor) -> torch.Tensor:
x_dim = x.shape[-1]
if self.use_quantiles:
return (x + 1.0) / 2.0 * (self.q99[..., :x_dim] - self.q01[..., :x_dim] + self.EPSILON) + self.q01[
..., :x_dim
]
else:
return x * (self.std[..., :x_dim] + self.EPSILON) + self.mean[..., :x_dim]
class DeltaActions:
"""Repacks absolute actions into delta action space."""
def __init__(self):
# If the robot has mobile base, masks of base action are False and it doesn't need to be specified explicitly.
self.mask = torch.tensor([True, True, True, True, True, True, False, True, True, True, True, True, True, False])
def to(self, device: torch.device | str) -> None:
self.mask = self.mask.to(device)
def __call__(self, data: dict[str, Any]) -> dict[str, Any]:
if "action" not in data or "observation.state" not in data:
return data
state, action = data["observation.state"], data["action"]
dims = self.mask.shape[-1]
action[..., :dims] -= torch.where(self.mask, state[..., :dims], torch.zeros_like(state[..., :dims])).unsqueeze(
-2
)
data["action"] = action
return data
class AbsoluteActions:
"""Repacks delta actions into absolute action space."""
def __init__(self):
# If the robot has mobile base, masks of base action are False and it doesn't need to be specified explicitly.
self.mask = torch.tensor([True, True, True, True, True, True, False, True, True, True, True, True, True, False])
def to(self, device: torch.device | str) -> None:
self.mask = self.mask.to(device)
def __call__(self, data: dict[str, Any]) -> dict[str, Any]:
if "action" not in data or "observation.state" not in data:
return data
state, action = data["observation.state"], data["action"]
dims = self.mask.shape[-1]
action[..., :dims] += torch.where(self.mask, state[..., :dims], torch.zeros_like(state[..., :dims])).unsqueeze(
-2
)
data["action"] = action
return data
class AlohaInputs:
"""Inputs for the Aloha policy."""
def __init__(self, adapt_to_pi: bool = True) -> None:
self.joint_flip_mask = torch.tensor([1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1])
self.adapt_to_pi = adapt_to_pi
def to(self, device: torch.device | str) -> None:
self.joint_flip_mask = self.joint_flip_mask.to(device)
def _gripper_from_angular_inv(self, value: torch.Tensor) -> torch.Tensor:
# Directly inverts the gripper_from_angular function.
value = _unnormalize(value, min_val=-0.6213, max_val=1.4910)
return value - 0.5476
def _gripper_to_angular(self, value: torch.Tensor) -> torch.Tensor:
# Aloha transforms the gripper positions into a linear space. The following code
# reverses this transformation to be consistent with pi0 which is pretrained in
# angular space.
#
# These values are coming from the Aloha code:
# PUPPET_GRIPPER_POSITION_OPEN, PUPPET_GRIPPER_POSITION_CLOSED
value = _unnormalize(value, min_val=0.01844, max_val=0.05800)
# This is the inverse of the angular to linear transformation inside the Interbotix code.
def linear_to_radian(linear_position, arm_length, horn_radius):
value = (horn_radius**2 + linear_position**2 - arm_length**2) / (2 * horn_radius * linear_position)
return torch.arcsin(torch.clip(value, -1.0, 1.0))
# The constants are taken from the Interbotix code.
value = linear_to_radian(value, arm_length=0.036, horn_radius=0.022)
# pi0 gripper data is normalized (0, 1) between encoder counts (2405, 3110).
# There are 4096 total encoder counts and aloha uses a zero of 2048.
# Converting this to radians means that the normalized inputs are between (0.5476, 1.6296)
return _normalize(value, min_val=0.5476, max_val=1.6296)
def _encode_actions_inv(self, actions: torch.Tensor) -> torch.Tensor:
if self.adapt_to_pi:
actions[:, :14] = self.joint_flip_mask * actions[:, :14]
actions[:, [6, 13]] = self._gripper_from_angular_inv(actions[:, [6, 13]])
return actions
def _decode_state(self, state: torch.Tensor) -> torch.Tensor:
if self.adapt_to_pi:
# Flip the joints.
state[:14] = self.joint_flip_mask * state[:14]
# Reverse the gripper transformation that is being applied by the Aloha runtime.
state[[6, 13]] = self._gripper_to_angular(state[[6, 13]])
return state
def _decode_aloha(self, state: torch.Tensor) -> torch.Tensor:
# state is [left_arm_joint_angles, left_arm_gripper, right_arm_joint_angles, right_arm_gripper]
# dim sizes: [6, 1, 6, 1]
state = self._decode_state(state)
return state
def __call__(self, data: dict[str, Any]) -> dict[str, Any]:
"""Decode Aloha-specific input formats into the pi0 training/runtime
format."""
state = self._decode_aloha(data["observation.state"])
data["observation.state"] = state
# Actions are only available during training.
if "action" in data:
actions = data["action"]
actions = self._encode_actions_inv(actions)
data["action"] = actions
return data
# VeRL: Batch Inference
def _encode_actions_inv_batch(self, actions: torch.Tensor) -> torch.Tensor:
if self.adapt_to_pi:
actions[..., :14] = self.joint_flip_mask * actions[..., :14]
actions[..., [6, 13]] = self._gripper_from_angular_inv(actions[..., [6, 13]])
return actions
def _decode_state_batch(self, state: torch.Tensor) -> torch.Tensor:
if self.adapt_to_pi:
state[..., :14] = self.joint_flip_mask * state[..., :14]
state[..., [6, 13]] = self._gripper_to_angular(state[..., [6, 13]])
return state
def call_batch(self, data: dict[str, Any]) -> dict[str, Any]:
state = self._decode_state_batch(data["observation.state"])
data["observation.state"] = state
if "action" in data:
actions = data["action"]
actions = self._encode_actions_inv_batch(actions)
data["action"] = actions
return data
class AlohaOutputs:
"""Outputs for the Aloha policy."""
def __init__(self, original_action_dim: int, adapt_to_pi: bool = True):
"""
Args:
original_action_dim: int. The original action dimension of the policy. dual-arm robot has 14 dims and mobile
dual-arm robot has 16 dims.
adapt_to_pi: bool. If true, this will convert the joint and gripper values from the standard Aloha space to
the space used by the pi internal runtime which was used to train the base model.
"""
self.joint_flip_mask = torch.tensor([1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1])
self.original_action_dim = original_action_dim
self.adapt_to_pi = adapt_to_pi
def to(self, device: torch.device | str) -> None:
self.joint_flip_mask = self.joint_flip_mask.to(device)
def _gripper_from_angular(self, value: torch.Tensor) -> torch.Tensor:
# Convert from the gripper position used by pi0 to the gripper position that is used by Aloha.
# Note that the units are still angular but the range is different.
# We do not scale the output since the trossen model predictions are already in radians.
# See the comment in _gripper_to_angular for a derivation of the constant
value = value + 0.5476
# These values are coming from the Aloha code:
# PUPPET_GRIPPER_JOINT_OPEN, PUPPET_GRIPPER_JOINT_CLOSE
return _normalize(value, min_val=-0.6213, max_val=1.4910)
def _encode_actions(self, actions: torch.Tensor) -> torch.Tensor:
if self.adapt_to_pi:
# Flip the joints.
actions[:, :14] = self.joint_flip_mask * actions[:, :14]
actions[:, [6, 13]] = self._gripper_from_angular(actions[:, [6, 13]])
return actions
def __call__(self, data: dict[str, Any]) -> dict[str, Any]:
actions = data["action"][:, : self.original_action_dim]
return {"action": self._encode_actions(actions)}
# VeRL: Batch Inference
def _encode_actions_batch(self, actions: torch.Tensor) -> torch.Tensor:
if self.adapt_to_pi:
actions[..., :14] = self.joint_flip_mask * actions[..., :14]
actions[..., [6, 13]] = self._gripper_from_angular(actions[..., [6, 13]])
return actions
def call_batch(self, data: dict[str, Any]) -> dict[str, Any]:
actions = data["action"][..., : self.original_action_dim]
return {"action": self._encode_actions_batch(actions)}
class PadStatesAndActions:
"""Zero-pads states and actions to the model action dimension."""
def __init__(self, action_dim: int) -> None:
self.action_dim = action_dim
def _pad_to_dim(self, x: torch.Tensor, target_dim: int, axis: int = -1) -> torch.Tensor:
"""Pad an array to the target dimension with zeros along the specified
axis."""
current_dim = x.shape[axis]
if current_dim < target_dim:
shape = list(x.shape)
shape[-1] = target_dim
new_vector = torch.zeros(*shape, dtype=x.dtype, device=x.device)
new_vector[..., :current_dim] = x
x = new_vector
return x
def __call__(self, data: dict[str, Any]) -> dict[str, Any]:
data["observation.state"] = self._pad_to_dim(data["observation.state"], self.action_dim, axis=-1)
if "action" in data:
data["action"] = self._pad_to_dim(data["action"], self.action_dim, axis=-1)
return data
def _normalize(x: torch.Tensor, min_val: float, max_val: float) -> torch.Tensor:
return (x - min_val) / (max_val - min_val)
def _unnormalize(x: torch.Tensor, min_val: float, max_val: float) -> torch.Tensor:
return x * (max_val - min_val) + min_val
def resize_with_pad(img: torch.Tensor, width: int, height: int, pad_value: float = -1.0) -> torch.Tensor:
"""Resize an image to fit inside the given (width, height) while preserving
aspect ratio, then pad with the specified value so that the final image
exactly matches the target size.
Args:
img: Input image, shape (C, H, W), with values typically in [0, 1].
width: Target width (W).
height: Target height (H).
pad_value: Value to use for padding, defaults to -1.
Returns:
A torch.Tensor of shape (C, height, width).
"""
# Validate input dimensions
if img.ndim != 3:
raise ValueError(f"(C,H,W) expected, but got {img.shape}")
cur_height, cur_width = img.shape[1:]
ratio = max(cur_width / width, cur_height / height)
resized_height = int(cur_height / ratio)
resized_width = int(cur_width / ratio)
resized_img = F.interpolate(
img.unsqueeze(0), size=(resized_height, resized_width), mode="bilinear", align_corners=False
).squeeze(0)
pad_height = max(0, int(height - resized_height))
pad_width = max(0, int(width - resized_width))
pad_top = pad_height // 2
pad_bottom = pad_height - pad_top
pad_left = pad_width // 2
pad_right = pad_width - pad_left
padded_img = F.pad(resized_img, (pad_left, pad_right, pad_top, pad_bottom), value=pad_value)
return padded_img.squeeze(0)
class ImageTransform:
def __init__(
self,
resize_imgs_with_padding: tuple[int, int],
present_img_keys: list[str] | None = None,
enable_image_aug: bool = False,
) -> None:
self.resize_imgs_with_padding = resize_imgs_with_padding
self.present_img_keys = present_img_keys
if self.present_img_keys is None:
self.present_img_keys = [
"observation.images.cam_high",
"observation.images.cam_left_wrist",
"observation.images.cam_right_wrist",
]
self.enable_image_aug = enable_image_aug
self.width, self.height = resize_imgs_with_padding
if self.enable_image_aug:
self.color_jitter_transform = transforms.ColorJitter(
brightness=0.3,
contrast=0.4,
saturation=0.5,
)
self.pose_transform = transforms.Compose(
[
transforms.RandomCrop(int(self.width * 0.95), int(self.height * 0.95)),
transforms.Resize((self.width, self.height)),
transforms.RandomRotation((-5, 5)),
]
)
def __call__(self, data: dict[str, torch.Tensor]) -> tuple[list[torch.Tensor], list[torch.Tensor]]:
"""Preprocesses input images: optionally scales and pads to a fixed size,
then maps the pixel range from [0,1] to [-1,1].
Returns two lists:
images: The processed image arrays (C, H, W).
img_masks: A list of boolean masks of the same length as images, currently fixed to True.
"""
images = []
img_masks = []
for key in self.present_img_keys:
if key not in data:
raise ValueError(
f"{key} not found in data. Please check the present_img_keys in the config or the dataset."
)
img = data[key]
# [C, H, W] -> preprocess
if self.resize_imgs_with_padding is not None:
original_height, original_width = img.shape[1:]
target_height, target_width = self.resize_imgs_with_padding
if original_height != target_height or original_width != target_width:
img = resize_with_pad(img, *self.resize_imgs_with_padding, pad_value=0)
if self.enable_image_aug:
if "wrist" not in key:
img = self.pose_transform(img)
img = self.color_jitter_transform(img)
# Normalize pixel values to [-1, 1]
img = img * 2.0 - 1.0
images.append(img)
img_masks.append(torch.tensor(True, dtype=torch.bool, device=img.device))
return images, img_masks
# VeRL: Batch Inference
def call_batch(self, data: dict[str, torch.Tensor]) -> tuple[list[torch.Tensor], list[torch.Tensor]]:
images = []
img_masks = []
for key in self.present_img_keys:
if key not in data:
raise ValueError(
f"{key} not found in data. Please check the present_img_keys in the config or the dataset."
)
img = data[key]
if img.ndim != 4:
raise ValueError(f"(B,C,H,W) expected, but got {img.shape}")
if self.resize_imgs_with_padding is not None:
original_height, original_width = img.shape[2:]
target_height, target_width = self.resize_imgs_with_padding
if original_height != target_height or original_width != target_width:
ratio = max(original_width / target_width, original_height / target_height)
resized_height = int(original_height / ratio)
resized_width = int(original_width / ratio)
img = F.interpolate(img, size=(resized_height, resized_width), mode="bilinear", align_corners=False)
pad_height = max(0, int(target_height - resized_height))
pad_width = max(0, int(target_width - resized_width))
pad_top = pad_height // 2
pad_bottom = pad_height - pad_top
pad_left = pad_width // 2
pad_right = pad_width - pad_left
img = F.pad(img, (pad_left, pad_right, pad_top, pad_bottom), value=0)
if self.enable_image_aug:
imgs = []
for sample in img:
if "wrist" not in key:
sample = self.pose_transform(sample)
sample = self.color_jitter_transform(sample)
imgs.append(sample)
img = torch.stack(imgs, dim=0)
img = img / 255.0 * 2.0 - 1.0 # pi05 libero
images.append(img)
img_masks.append(torch.ones((img.shape[0],), dtype=torch.bool, device=img.device))
return images, img_masks
class PromptTokenizerTransform:
def __init__(self, max_length: int, discrete_state_input: bool = False) -> None:
# self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_model_path)
self.tokenizer_max_length = max_length
self.discrete_state_input = discrete_state_input
def __call__(self, data: dict[str, Any], tokenizer) -> tuple[torch.Tensor, torch.Tensor]:
"""Tokenize the text input.
Args:
data: Dict containing 'task' string and optionally 'observation.state' tensor to infer device.
Returns:
A tuple of (lang_tokens, lang_masks), both as torch tensors on the inferred device.
"""
task = data["task"].strip().replace("_", " ").replace("\n", " ")
# Infer device from observation.state if available
device = data["observation.state"].device if "observation.state" in data else torch.device("cpu")
if self.discrete_state_input:
assert "observation.state" in data, "discrete_state_input is True, but observation.state is not found."
discretized_state = (
torch.bucketize(data["observation.state"], torch.linspace(-1, 1, 256 + 1, device=device)[:-1]) - 1
)
state_values = " ".join([str(int(x)) for x in discretized_state.tolist()])
task = f"Task: {task}, State: {state_values};\nAction: "
else:
# PaliGemma prompt has to end with a new line in Pi0
task = f"{task}\n"
tokenized_prompt = tokenizer(
task,
padding="max_length",
padding_side="right",
max_length=self.tokenizer_max_length,
return_tensors="pt",
)
lang_tokens = tokenized_prompt["input_ids"][0].to(dtype=torch.int32, device=device)
lang_masks = tokenized_prompt["attention_mask"][0].to(dtype=torch.bool, device=device)
return lang_tokens, lang_masks
# VeRL: Batch Inference
def call_batch(self, data: dict[str, Any], tokenizer) -> tuple[torch.Tensor, torch.Tensor]:
task = data["task"]
if hasattr(task, "tolist") and not isinstance(task, str):
tasks = task.tolist()
else:
tasks = list(task)
tasks = [str(t).strip().replace("_", " ").replace("\n", " ") for t in tasks]
device = data["observation.state"].device if "observation.state" in data else torch.device("cpu")
if self.discrete_state_input:
assert "observation.state" in data, "discrete_state_input is True, but observation.state is not found."
state = data["observation.state"]
discretized_state = torch.bucketize(state, torch.linspace(-1, 1, 256 + 1, device=device)[:-1]) - 1
state_values = [" ".join([str(int(x)) for x in row.tolist()]) for row in discretized_state]
tasks = [
f"Task: {task_item}, State: {state_value};\nAction: "
for task_item, state_value in zip(tasks, state_values, strict=False)
]
else:
tasks = [f"{task_item}\n" for task_item in tasks]
tokenized_prompt = tokenizer(
tasks,
padding="max_length",
padding_side="right",
max_length=self.tokenizer_max_length,
return_tensors="pt",
)
lang_tokens = tokenized_prompt["input_ids"].to(dtype=torch.int32, device=device)
lang_masks = tokenized_prompt["attention_mask"].to(dtype=torch.bool, device=device)
return lang_tokens, lang_masks
|
verl__experimental__vla__models__pi0_torch__pi0_utils.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
import torch
class Pi0Input(ABC):
def __init__(self):
# three images for pi0 input with keys:
# [
# 'observation.images.cam_high',
# 'observation.images.cam_left_wrist',
# 'observation.images.cam_right_wrist',
# ],
# each with shape (B, C, H, W)
self.images: dict[str, torch.Tensor] = {}
# image masks corresponding to the images, each with shape (B,)
self.img_masks: list[torch.Tensor] = []
# task description as a list of strings
self.task: list[str] = []
# robot state with shape (B, state_dim)
self.state: torch.Tensor = None
@classmethod
@abstractmethod
def from_env_obs(cls, env_obs) -> "Pi0Input": ...
class Pi0Output:
def __init__(self):
self.action: torch.Tensor = None
@classmethod
@abstractmethod
def from_model_output(cls, model_output) -> "Pi0Output": ...
|
verl__experimental__vla__models__pi0_torch__policy__base.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from typing_extensions import override
from verl.protocol import DataProto
from .base import Pi0Input, Pi0Output
PI0_MAX_STATE_DIM = 32
PI0_ACTION_CHUNK_SIZE = 10
LIBERO_ACTION_DIM = 7
class LiberoPi0Input(Pi0Input):
@override
@classmethod
def from_env_obs(cls, env_obs: DataProto) -> "LiberoPi0Input":
input = cls()
# Process images
images = env_obs.batch["full_image"]
wrist_images = env_obs.batch["wrist_image"]
batch_size = images.shape[0]
cam_high = images.permute(0, 3, 1, 2)
left_wrist = wrist_images.permute(0, 3, 1, 2) # (B, H, W, C) -> (B, C, H, W)
empty_images = torch.zeros(
(batch_size, 3, cam_high.shape[2], cam_high.shape[3]),
device=env_obs.batch.device,
dtype=torch.bfloat16,
)
input.images = {
"observation.images.cam_high": cam_high.to(torch.bfloat16),
"observation.images.cam_left_wrist": left_wrist.to(torch.bfloat16),
"observation.images.cam_right_wrist": empty_images,
}
input.img_masks = [
torch.ones((batch_size,), device=env_obs.batch.device, dtype=torch.bool),
torch.ones((batch_size,), device=env_obs.batch.device, dtype=torch.bool),
torch.zeros((batch_size,), device=env_obs.batch.device, dtype=torch.bool),
]
# Process other data
input.task = list(env_obs.non_tensor_batch["task_descriptions"])
state = env_obs.batch["state"]
input.state = torch.nn.functional.pad(
state, (0, max(0, PI0_MAX_STATE_DIM - state.shape[-1])), "constant", 0
).to(env_obs.batch.device, dtype=torch.float32)
return input
class LiberoPi0Output(Pi0Output):
@override
@classmethod
def from_model_output(cls, model_output: dict) -> "LiberoPi0Output":
output = cls()
output.action = model_output["full_action"][:, :PI0_ACTION_CHUNK_SIZE, :LIBERO_ACTION_DIM]
return output
|
verl__experimental__vla__models__pi0_torch__policy__libero_policy.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility helpers to register custom VLA models with Hugging Face Auto classes."""
from transformers import AutoConfig, AutoImageProcessor, AutoModelForVision2Seq, AutoProcessor
from .openvla_oft.configuration_prismatic import OpenVLAConfig
from .openvla_oft.modeling_prismatic import OpenVLAForActionPrediction
from .openvla_oft.processing_prismatic import PrismaticImageProcessor, PrismaticProcessor
from .pi0_torch import PI0ForActionPrediction, PI0TorchConfig
_REGISTERED_MODELS = {
"openvla_oft": False,
"pi0_torch": False,
}
def register_openvla_oft() -> None:
"""Register the OpenVLA OFT model and processors."""
if _REGISTERED_MODELS["openvla_oft"]:
return
AutoConfig.register("openvla", OpenVLAConfig)
AutoImageProcessor.register(OpenVLAConfig, PrismaticImageProcessor)
AutoProcessor.register(OpenVLAConfig, PrismaticProcessor)
AutoModelForVision2Seq.register(OpenVLAConfig, OpenVLAForActionPrediction)
_REGISTERED_MODELS["openvla_oft"] = True
def register_pi0_torch_model() -> None:
"""Register the PI0 wrapper with the HF auto classes."""
if _REGISTERED_MODELS["pi0_torch"]:
return
AutoConfig.register("pi0_torch", PI0TorchConfig)
AutoModelForVision2Seq.register(PI0TorchConfig, PI0ForActionPrediction)
_REGISTERED_MODELS["pi0_torch"] = True
def register_vla_models() -> None:
"""Register all custom VLA models with Hugging Face."""
register_openvla_oft()
register_pi0_torch_model()
|
verl__experimental__vla__models__register_vla_models.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
In single GPU rollout, the sequences are generated directly by sampling from the model.
The output will contain
1. output_ids
2. attention_masks (left padding)
3. eos_masks
4. log_probs
"""
import json
import logging
import os
import torch
from PIL import Image
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.nn.utils.rnn import pad_sequence
from verl import DataProto
from verl.experimental.vla.envs.action_utils import center_crop_image, resize_image
from verl.experimental.vla.models.openvla_oft.modeling_prismatic import OpenVLAForActionPrediction
from verl.experimental.vla.models.openvla_oft.processing_prismatic import PrismaticProcessor
from verl.utils.device import get_device_id, get_device_name, get_torch_device
from verl.workers.rollout.base import BaseRollout
logger = logging.getLogger(__name__)
__all__ = ["NaiveRolloutRob"]
def pad_sequence_to_length(tensors, max_seq_len, pad_token_id, left_pad=False):
"""
pad a 2D tensors (e.g. responses, logprobs) in the last dim to max_seq_length.
input shape: [bs, seq_length]
output shape: [bs, max_seq_length]
(0, max_seq_len - tensors.shape[-1]) means right pad to max_seq_length and no left pad
"""
if tensors.shape[-1] >= max_seq_len:
return tensors
pad_tuple = (max_seq_len - tensors.shape[-1], 0) if left_pad else (0, max_seq_len - tensors.shape[-1])
return torch.nn.functional.pad(tensors, pad_tuple, "constant", pad_token_id)
def process_input(task_descriptions, images_and_states, processor):
batchdata = {"input_ids": [], "attention_mask": [], "pixel_values": []}
for i in range(len(task_descriptions)):
task_description = task_descriptions[i]
image = resize_image(images_and_states["full_image"][i].cpu().numpy(), (224, 224))
image = Image.fromarray(image).convert("RGB")
image = center_crop_image(image)
prompt = f"In: What action should the robot take to {task_description.lower()}?\nOut:"
batch_feature = processor(prompt, image)
input_ids = batch_feature["input_ids"]
attention_mask = batch_feature["attention_mask"]
pixel_values = batch_feature["pixel_values"]
if not torch.all(input_ids[:, -1] == 29871):
input_ids = torch.cat(
(input_ids, torch.unsqueeze(torch.Tensor([29871]).long(), dim=0).to(input_ids.device)), dim=1
)
attention_mask = torch.cat(
(attention_mask, torch.unsqueeze(torch.Tensor([True]).bool(), dim=0).to(attention_mask.device)), dim=1
)
batchdata["input_ids"].append(input_ids)
batchdata["attention_mask"].append(attention_mask)
batchdata["pixel_values"].append(pixel_values)
device = get_device_id()
batchdata["input_ids"] = [x.transpose(0, 1) for x in batchdata["input_ids"]]
batchdata["attention_mask"] = [x.transpose(0, 1) for x in batchdata["attention_mask"]]
batchdata["input_ids"] = (
pad_sequence(batchdata["input_ids"], batch_first=True, padding_value=processor.tokenizer.pad_token_id)
.squeeze(-1)
.to(device)
)
batchdata["attention_mask"] = (
pad_sequence(batchdata["attention_mask"], batch_first=True, padding_value=0).squeeze(-1).to(device)
)
padding_mask = batchdata["input_ids"].ne(processor.tokenizer.pad_token_id)
assert torch.all(padding_mask == batchdata["attention_mask"].ne(0))
padding_mask = ~padding_mask
padding_mask = padding_mask.int()
sorted_indices = torch.argsort(padding_mask, dim=1, descending=True, stable=True)
batchdata["input_ids"] = torch.gather(batchdata["input_ids"], 1, sorted_indices)
batchdata["attention_mask"] = torch.gather(batchdata["attention_mask"], 1, sorted_indices)
batchdata["pixel_values"] = torch.cat(batchdata["pixel_values"], dim=0).to(device)
assert torch.all(batchdata["attention_mask"].ne(0) == batchdata["input_ids"].ne(processor.tokenizer.pad_token_id))
return batchdata
class NaiveRolloutRob(BaseRollout):
def __init__(
self,
model_config: dict,
module: torch.nn.Module = None,
):
self.model_config = model_config
if module is not None:
self.module = module
else:
self.module = OpenVLAForActionPrediction.from_pretrained(model_config["path"], trust_remote_code=True)
self.module.vision_backbone.set_num_images_in_input(1)
self.processor = PrismaticProcessor.from_pretrained(model_config["path"], trust_remote_code=True)
dataset_statistics_path = os.path.join(model_config["path"], "dataset_statistics.json")
if os.path.isfile(dataset_statistics_path):
with open(dataset_statistics_path) as f:
norm_stats = json.load(f)
if isinstance(self.module, FSDP):
self.module.module.norm_stats = norm_stats
else:
self.module.norm_stats = norm_stats
self.module.eval()
@torch.no_grad()
def _generate_one_step(self, prompts: dict, do_sample, temperature, max_prompt_length):
idx = prompts["input_ids"] # (bs, prompt_length)
attention_mask = prompts["attention_mask"] # left-padded attention_mask
pixel_values = prompts["pixel_values"]
with torch.autocast(device_type=get_device_name(), dtype=torch.bfloat16):
actions, response = self.module.generate_action_verl(
input_ids=idx,
pixel_values=pixel_values,
attention_mask=attention_mask,
padding_idx=self.processor.tokenizer.pad_token_id,
do_sample=do_sample,
unnorm_key="libero_10_no_noops",
temperature=temperature,
)
assert self.processor.tokenizer.pad_token_id is not None
assert idx.ndim == 2
idx = pad_sequence_to_length(
idx, max_seq_len=max_prompt_length, pad_token_id=self.processor.tokenizer.pad_token_id, left_pad=True
)
assert attention_mask.ndim == 2
attention_mask = pad_sequence_to_length(
attention_mask, max_seq_len=max_prompt_length, pad_token_id=0, left_pad=True
)
device_type = get_device_name()
assert idx.device.type == device_type
assert response.device.type == device_type
assert attention_mask.device.type == device_type
assert pixel_values.device.type == device_type
batch = {
"responses": response,
"input_ids": idx,
"attention_mask": attention_mask,
"pixel_values": pixel_values,
"action": actions,
}
return batch
# @conditional_profiler(name="generate_sequences", path="traces/rollout", max_steps=5)
@torch.no_grad()
def generate_sequences(self, prompts: DataProto) -> DataProto:
"""Generate sequences"""
# make sampling args can be overriden by inputs
do_sample = prompts.meta_info["do_sample"]
temperature = prompts.meta_info["temperature"]
max_prompt_length = prompts.meta_info["prompt_length"]
# TODO: split into micro-batches
task_descriptions = prompts.non_tensor_batch["task_descriptions"]
images_and_states = {"full_image": prompts.batch["full_image"]}
vla_input = process_input(task_descriptions, images_and_states, self.processor)
vla_output = self._generate_one_step(vla_input, do_sample, temperature, max_prompt_length)
# batch = TensorDict(vla_output)
batch = DataProto.from_dict(tensors=vla_output)
return batch
async def update_weights(self, weights_iterator, **kwargs):
prefix = "_fsdp_wrapped_module."
target_state_dict = self.module.state_dict()
loaded_tensors_count = 0
for name, param in weights_iterator:
cleaned_name = name.replace(prefix, "")
if cleaned_name in target_state_dict:
target_tensor = target_state_dict[cleaned_name]
try:
target_tensor.copy_(param, non_blocking=True)
loaded_tensors_count += 1
except Exception as e:
logger.warning(f"Warning: Failed to copy tensor '{cleaned_name}'. Error: {e}")
else:
logger.warning(f"Warning: Failed to copy tensor '{cleaned_name}'. Model has no such key.")
logger.info(f"Rollout model weights updated. Loaded {loaded_tensors_count} tensors one by one.")
async def release(self):
if self.module.device.type == get_device_name():
logger.info("Releasing rollout model to CPU.")
self.module.cpu()
self.device = torch.device("cpu")
get_torch_device().empty_cache()
async def resume(self, **kwargs):
if self.module.device.type == "cpu":
target_device = get_device_name()
logger.info(f"Resuming rollout model to device: {target_device}.")
self.module.to(target_device)
self.device = torch.device(target_device)
|
verl__experimental__vla__naive_rollout_rob.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Preprocess the Geometry3k dataset to parquet format
"""
import argparse
import os
import random
import numpy as np
import torch
from datasets import Dataset
from libero.libero import get_libero_path
from libero.libero.benchmark import Benchmark, get_benchmark
def patched_get_task_init_states(self, i):
init_states_path = os.path.join(
get_libero_path("init_states"),
self.tasks[i].problem_folder,
self.tasks[i].init_states_file,
)
init_states = torch.load(init_states_path, weights_only=False)
return init_states
Benchmark.get_task_init_states = patched_get_task_init_states
def compute_total_num_group_envs(task_suite: Benchmark):
total_num_group_envs = 0
trial_id_bins = []
for task_id in range(task_suite.get_num_tasks()):
task_num_trials = len(task_suite.get_task_init_states(task_id))
trial_id_bins.append(task_num_trials)
total_num_group_envs += task_num_trials
cumsum_trial_id_bins = np.cumsum(trial_id_bins)
return total_num_group_envs, cumsum_trial_id_bins
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--task_suite_name", default="libero_10")
parser.add_argument(
"--local_save_dir", default="~/data/libero_rl", help="The save directory for the preprocessed dataset."
)
args = parser.parse_args()
random.seed(42)
np.random.seed(42)
task_suite = get_benchmark("libero_10")()
total_num_group_envs, cumsum_trial_id_bins = compute_total_num_group_envs(task_suite)
print(f"Total number of group envs: {total_num_group_envs}")
print(f"Cumsum trial id bins: {cumsum_trial_id_bins}")
# Total number of group envs: 500
# Cumsum trial id bins: [ 50 100 150 200 250 300 350 400 450 500]
def get_state_ids_for_task(task_id):
start_id = 0 if task_id == 0 else cumsum_trial_id_bins[task_id - 1]
end_id = cumsum_trial_id_bins[task_id]
return list(range(start_id, end_id))
all_task_ids = list(range(task_suite.get_num_tasks()))
train_task_ids = sorted(random.sample(all_task_ids, 9))
ood_test_task_id = list(set[int](all_task_ids) - set(train_task_ids))[0] # for OOD test
print("\n[Data Split Plan]")
print(f"Training Task IDs: {train_task_ids}")
print(f"OOD Test Task ID: {ood_test_task_id}")
train_metadata = []
test_metadata = []
for task_id in train_task_ids:
all_trials = get_state_ids_for_task(task_id)
random.shuffle(all_trials)
selected_train_trials = all_trials[:40]
for state_id in selected_train_trials:
train_metadata.append({"task_id": task_id, "state_id": state_id, "data_source": "train"})
# ID
for task_id in train_task_ids:
all_trials = get_state_ids_for_task(task_id)
random.shuffle(all_trials)
selected_id_test_trials = all_trials[40:]
for state_id in selected_id_test_trials[:10]:
test_metadata.append({"task_id": task_id, "state_id": state_id, "data_source": "test_in_distribution"})
# OOD
ood_all_trials = get_state_ids_for_task(ood_test_task_id)
random.shuffle(ood_all_trials)
selected_ood_trials = ood_all_trials[:20]
for state_id in selected_ood_trials:
test_metadata.append(
{"task_id": ood_test_task_id, "state_id": state_id, "data_source": "test_out_of_distribution"}
)
print(f"Generated {len(train_metadata)} training samples.")
print(f"Generated {len(test_metadata)} testing samples.")
print("-" * 20)
train_ds_meta = Dataset.from_list(train_metadata)
test_ds_meta = Dataset.from_list(test_metadata)
def map_and_process(example, idx):
task_id = example["task_id"]
state_id = example["state_id"]
data_source = example["data_source"]
split = "train" if data_source == "train" else "test"
task = task_suite.get_task(task_id)
# demonstration = task.get_demonstration(state_id)
data = {
"data_source": data_source,
"prompt": task.language,
"state_ids": state_id,
"task_ids": task_id,
"ability": "robot",
"extra_info": {
"split": split,
"state_ids": state_id,
"index": idx,
"task": task,
"task_ids": task_id,
},
}
return data
print("Mapping and processing training dataset...")
train_dataset = train_ds_meta.map(map_and_process, with_indices=True, num_proc=8)
print("Mapping and processing test dataset...")
test_dataset = test_ds_meta.map(map_and_process, with_indices=True, num_proc=8)
local_save_dir = os.path.expanduser(args.local_save_dir)
os.makedirs(local_save_dir, exist_ok=True)
print(f"Saving training dataset to {os.path.join(local_save_dir, 'train.parquet')}")
train_dataset.to_parquet(os.path.join(local_save_dir, "train.parquet"))
print(f"Saving test dataset to {os.path.join(local_save_dir, 'test.parquet')}")
test_dataset.to_parquet(os.path.join(local_save_dir, "test.parquet"))
print("\nDataset generation complete!")
print("\n--- Verification ---")
print("Train dataset data sources:", train_dataset.unique("data_source"))
print("Test dataset data sources:", test_dataset.unique("data_source"))
print("Train dataset length:", len(train_dataset))
print("Test dataset length:", len(test_dataset))
|
verl__experimental__vla__prepare_libero_dataset.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PPO Trainer with Ray-based single controller.
This trainer supports model-agonistic model initialization with huggingface
"""
import asyncio
import uuid
from collections import defaultdict
from pprint import pprint
import numpy as np
import torch
from omegaconf import OmegaConf
from tqdm import tqdm
from verl import DataProto
from verl.experimental.dataset.sampler import AbstractCurriculumSampler
from verl.protocol import pad_dataproto_to_divisor, unpad_dataproto
from verl.single_controller.ray import RayClassWithInitArgs
from verl.single_controller.ray.base import create_colocated_worker_cls
from verl.trainer.ppo.core_algos import agg_loss
from verl.trainer.ppo.metric_utils import (
compute_data_metrics,
compute_throughout_metrics,
compute_timing_metrics,
process_validation_metrics,
)
from verl.trainer.ppo.ray_trainer import RayPPOTrainer, apply_kl_penalty, compute_advantage
from verl.trainer.ppo.reward import compute_reward
from verl.trainer.ppo.utils import Role
from verl.utils.checkpoint.checkpoint_manager import should_save_ckpt_esi
from verl.utils.debug import marked_timer
from verl.utils.metric import reduce_metrics
def compute_response_mask(config, data: DataProto) -> torch.Tensor:
"""Compute the attention mask for the response part of the sequence.
This function extracts the portion of the attention mask that corresponds to the model's response,
which is used for masking computations that should only apply to response tokens.
Args:
data (DataProto): The data containing batched model outputs and inputs.
Returns:
torch.Tensor: The attention mask for the response tokens.
"""
complete = data.batch["complete"] # shape: [batch_size, num_steps, chunk_size]
complete_traj = complete.view(complete.shape[0], -1) # # shape: [batch_size, num_steps * chunk_size]
batch_size, action_steps = complete_traj.shape
step_indices = torch.arange(action_steps, device=complete.device).unsqueeze(0).expand(batch_size, -1)
first_true_idx_approx = torch.argmax(complete_traj.long(), dim=1)
has_any_true = complete_traj.any(dim=1)
final_first_true_idx = torch.where(
has_any_true, first_true_idx_approx, torch.tensor(action_steps - 1, device=complete.device)
)
mask_traj = step_indices <= final_first_true_idx.unsqueeze(1)
mask = mask_traj.view(complete.shape) # shape: [batch_size, num_steps, chunk_size]
mask = mask.repeat_interleave(config.env.actor.model.action_dim, dim=-1) # eapand to action dim
return mask
def flatten_trajectories(data: DataProto) -> DataProto:
batch_size, num_steps = data.batch["action"].shape[:2]
new_batch_fields = {}
for key, tensor in data.batch.items():
if len(tensor.shape) >= 2 and tensor.shape[0] == batch_size and tensor.shape[1] == num_steps:
# (B, S, H, W) -> (B*S, H, W)
new_shape = (batch_size * num_steps, *tensor.shape[2:])
new_batch_fields[key] = tensor.reshape(new_shape)
elif len(tensor.shape) == 1 and tensor.shape[0] == batch_size:
# [e1, e2] -> [e1, e1, ..., e2, e2, ...] (S times each)
new_batch_fields[key] = tensor.repeat_interleave(num_steps)
else:
new_batch_fields[key] = tensor
new_data = DataProto.from_dict(tensors=new_batch_fields, meta_info=data.meta_info)
return new_data
# def filter_by_acc(data: DataProto, accuracy_lower_bound, accuracy_upper_bound) -> torch.Tensor:
class RobRayPPOTrainer(RayPPOTrainer):
"""Distributed PPO trainer using Ray for scalable reinforcement learning.
This trainer orchestrates distributed PPO training across multiple nodes and GPUs,
managing actor rollouts, critic training, and reward computation with Ray backend.
Supports various model architectures including FSDP, Megatron, vLLM, and SGLang integration.
"""
def _start_profiling(self, do_profile: bool) -> None:
"""Start profiling for all worker groups including env workers."""
super()._start_profiling(do_profile)
if do_profile and hasattr(self, "env_wg"):
self.env_wg.start_profile(role="env", profile_step=self.global_steps)
def _stop_profiling(self, do_profile: bool) -> None:
"""Stop profiling for all worker groups including env workers."""
super()._stop_profiling(do_profile)
if do_profile and hasattr(self, "env_wg"):
self.env_wg.stop_profile()
def init_workers(self):
self.resource_pool_manager.create_resource_pool()
if self.config.env.disagg_sim.enable:
# pin EnvWorker to Simulator GPU nodes
self.resource_pool_manager.get_resource_pool(Role.Env).accelerator_type = "sim"
self.resource_pool_manager.get_resource_pool(Role.ActorRollout).accelerator_type = "train_rollout"
self.resource_pool_to_cls = {pool: {} for pool in self.resource_pool_manager.resource_pool_dict.values()}
resource_pool = self.resource_pool_manager.get_resource_pool(Role.ActorRollout)
actor_rollout_cls = RayClassWithInitArgs(
cls=self.role_worker_mapping[Role.ActorRollout],
config=self.config.actor_rollout_ref,
role="actor_rollout",
)
self.resource_pool_to_cls[resource_pool]["actor_rollout"] = actor_rollout_cls
assert Role.Env in self.role_worker_mapping
if Role.Env in self.role_worker_mapping:
resource_pool = self.resource_pool_manager.get_resource_pool(Role.Env)
env_cls = RayClassWithInitArgs(self.role_worker_mapping[Role.Env], config=self.config.env)
self.resource_pool_to_cls[resource_pool]["env"] = env_cls
# initialize WorkerGroup
# NOTE: if you want to use a different resource pool for each role, which can support different parallel size,
# you should not use `create_colocated_worker_cls`.
# Instead, directly pass different resource pool to different worker groups.
# See https://github.com/volcengine/verl/blob/master/examples/ray/tutorial.ipynb for more information.
all_wg = {}
wg_kwargs = {} # Setting up kwargs for RayWorkerGroup
if OmegaConf.select(self.config.trainer, "ray_wait_register_center_timeout") is not None:
wg_kwargs["ray_wait_register_center_timeout"] = self.config.trainer.ray_wait_register_center_timeout
if OmegaConf.select(self.config.global_profiler, "steps") is not None:
wg_kwargs["profile_steps"] = OmegaConf.select(self.config.global_profiler, "steps")
# Only require nsight worker options when tool is nsys
if OmegaConf.select(self.config.global_profiler, "tool") == "nsys":
assert (
OmegaConf.select(self.config.global_profiler.global_tool_config.nsys, "worker_nsight_options")
is not None
), "worker_nsight_options must be set when using nsys with profile_steps"
wg_kwargs["worker_nsight_options"] = OmegaConf.to_container(
OmegaConf.select(self.config.global_profiler.global_tool_config.nsys, "worker_nsight_options")
)
wg_kwargs["device_name"] = self.device_name
for resource_pool, class_dict in self.resource_pool_to_cls.items():
worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict)
wg_dict = self.ray_worker_group_cls(
resource_pool=resource_pool,
ray_cls_with_init=worker_dict_cls,
**wg_kwargs,
)
spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys())
all_wg.update(spawn_wg)
# we should create rollout at the end so that vllm can have a better estimation of kv cache memory
self.actor_rollout_wg = all_wg["actor_rollout"]
self.actor_rollout_wg.init_model()
self.env_wg = all_wg["env"]
# create async rollout manager and request scheduler
self.async_rollout_mode = False
if self.config.actor_rollout_ref.rollout.mode == "async_envloop":
from verl.experimental.vla.env_loop import EnvLoop
self.async_rollout_mode = True
self.async_rollout_manager = EnvLoop(
config=self.config, rollout_wg=self.actor_rollout_wg, env_wg=self.env_wg
)
def _get_gen_batch(self, batch: DataProto) -> DataProto:
# pop those keys for generation
batch_keys_to_pop = []
non_tensor_batch_keys_to_pop = set(batch.non_tensor_batch.keys())
gen_batch = batch.pop(
batch_keys=batch_keys_to_pop,
non_tensor_batch_keys=list(non_tensor_batch_keys_to_pop),
)
return gen_batch
def _reset_envs(self, gen_batch: DataProto) -> asyncio.Future:
initial_state_ids = gen_batch.non_tensor_batch["state_ids"]
task_ids = gen_batch.non_tensor_batch["task_ids"]
reset_prompts = DataProto.from_dict(non_tensors={"state_ids": initial_state_ids, "task_ids": task_ids})
reset_future = self.env_wg.reset_envs_to_state_ids(reset_prompts)
return reset_future
def fit(self):
"""
The training loop of PPO.
The driver process only need to call the compute functions of the worker group through RPC
to construct the PPO dataflow.
The light-weight advantage computation is done on the driver process.
"""
from omegaconf import OmegaConf
from verl.utils.tracking import Tracking
logger = Tracking(
project_name=self.config.trainer.project_name,
experiment_name=self.config.trainer.experiment_name,
default_backend=self.config.trainer.logger,
config=OmegaConf.to_container(self.config, resolve=True),
)
self.global_steps = 0
# load checkpoint before doing anything
self._load_checkpoint()
# perform validation before training
# currently, we only support validation using the reward_function.
if self.val_reward_fn is not None and self.config.trainer.get("val_before_train", True):
val_metrics = self._validate()
assert val_metrics, f"{val_metrics=}"
pprint(f"Initial validation metrics: {val_metrics}")
logger.log(data=val_metrics, step=self.global_steps)
if self.config.trainer.get("val_only", False):
return
# add tqdm
progress_bar = tqdm(total=self.total_training_steps, initial=self.global_steps, desc="Training Progress")
# we start from step 1
self.global_steps += 1
last_val_metrics = None
self.max_steps_duration = 0
prev_step_profile = False
curr_step_profile = (
self.global_steps in self.config.global_profiler.steps
if self.config.global_profiler.steps is not None
else False
)
next_step_profile = False
for epoch in range(self.config.trainer.total_epochs):
train_iter = iter(self.train_dataloader)
next_batch_dict = next(train_iter)
need_validate = False
dataloader_len = len(self.train_dataloader)
print(f"Starting epoch {epoch}, dataloader length: {dataloader_len}")
for step_idx in range(dataloader_len):
batch_dict = next_batch_dict
try:
next_batch_dict = next(train_iter)
except StopIteration:
next_batch_dict = None
metrics = {}
timing_raw = {}
with marked_timer("start_profile", timing_raw):
self._start_profiling(
not prev_step_profile and curr_step_profile
if self.config.global_profiler.profile_continuous_steps
else curr_step_profile
)
batch: DataProto = DataProto.from_single_dict(batch_dict)
# add uid to batch
batch.non_tensor_batch["uid"] = np.array([str(uuid.uuid4()) for _ in range(len(batch))], dtype=object)
gen_batch = self._get_gen_batch(batch)
# pass global_steps to trace
gen_batch.meta_info["global_steps"] = self.global_steps
# pass generation config to gen_batch
gen_batch.meta_info["do_sample"] = True
gen_batch.meta_info["temperature"] = self.config.actor_rollout_ref.rollout.temperature
gen_batch.meta_info["prompt_length"] = self.config.actor_rollout_ref.rollout.prompt_length
gen_batch.meta_info["eos_token_id"] = self.tokenizer.eos_token_id
gen_batch.meta_info["n_samples"] = self.config.actor_rollout_ref.rollout.n
gen_batch.meta_info["pad_token_id"] = self.tokenizer.pad_token_id
gen_batch = gen_batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True)
is_last_step = self.global_steps >= self.total_training_steps
if step_idx == 0 or need_validate:
# reset env workers in first step
# if validation on last step, the reset was not executed and need to be done here
reset_future = self._reset_envs(gen_batch)
need_validate = (
self.val_reward_fn is not None
and self.config.trainer.test_freq > 0
and (is_last_step or self.global_steps % self.config.trainer.test_freq == 0)
)
with marked_timer("step", timing_raw):
# generate a batch
with marked_timer("gen", timing_raw, color="red"):
gen_batch_output = self.async_rollout_manager.generate_sequences(gen_batch, reset_future)
# prepare for next batch's env reset
if step_idx != dataloader_len - 1 and not need_validate:
next_batch: DataProto = DataProto.from_single_dict(next_batch_dict)
next_gen_batch = self._get_gen_batch(next_batch)
next_gen_batch = next_gen_batch.repeat(
repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True
)
reset_future = self._reset_envs(next_gen_batch)
# repeat to align with repeated responses in rollout
batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True)
batch = gen_batch_output
if "response_mask" not in batch.batch.keys():
batch.batch["response_mask"] = compute_response_mask(self.config, batch)
with marked_timer("reward", timing_raw, color="yellow"):
# compute reward model score
reward_tensor, reward_extra_infos_dict = compute_reward(batch, self.reward_fn)
batch.batch["reward_tensor"] = reward_tensor
batch = flatten_trajectories(batch)
batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist()
# recompute old_log_probs
with marked_timer("old_log_prob", timing_raw, color="blue"):
old_log_prob = self.actor_rollout_wg.compute_log_prob(batch)
entropys = old_log_prob.batch["entropys"]
response_masks = batch.batch["response_mask"]
actor_config = self.config.actor_rollout_ref.actor
entropy_agg = agg_loss(
loss_mat=entropys,
loss_mask=response_masks,
loss_agg_mode=actor_config.loss_agg_mode,
loss_scale_factor=actor_config.loss_scale_factor,
)
old_log_prob_metrics = {"actor/entropy": entropy_agg.detach().item()}
metrics.update(old_log_prob_metrics)
old_log_prob.batch.pop("entropys")
batch = batch.union(old_log_prob)
if "rollout_log_probs" in batch.batch.keys():
# TODO: we may want to add diff of probs too.
from verl.utils.debug.metrics import calculate_debug_metrics
metrics.update(calculate_debug_metrics(batch))
if self.use_reference_policy:
# compute reference log_prob
with marked_timer("ref", timing_raw, color="olive"):
if not self.ref_in_actor:
ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch)
else:
ref_log_prob = self.actor_rollout_wg.compute_ref_log_prob(batch)
batch = batch.union(ref_log_prob)
# compute values
if self.use_critic:
with marked_timer("values", timing_raw, color="cyan"):
values = self.critic_wg.compute_values(batch)
batch = batch.union(values)
with marked_timer("adv", timing_raw, color="brown"):
# we combine with rule-based rm
reward_extra_infos_dict: dict[str, list] = None
token_level_scores = torch.zeros_like(response_masks, dtype=torch.float32)
flipped_mask = response_masks.flip(dims=[1])
indices_in_flipped = torch.argmax(flipped_mask.long(), dim=1)
last_true_indices = response_masks.shape[-1] - 1 - indices_in_flipped
rows_with_response = response_masks.any(dim=1)
effective_rewards = batch.batch["reward_tensor"] * rows_with_response.to(
batch.batch["reward_tensor"].dtype
)
row_indices = torch.arange(response_masks.shape[0], device=token_level_scores.device)
token_level_scores[row_indices, last_true_indices] = effective_rewards
batch.batch["token_level_scores"] = token_level_scores
if reward_extra_infos_dict:
batch.non_tensor_batch.update({k: np.array(v) for k, v in reward_extra_infos_dict.items()})
# compute rewards. apply_kl_penalty if available
if self.config.algorithm.use_kl_in_reward:
batch, kl_metrics = apply_kl_penalty(
batch, kl_ctrl=self.kl_ctrl_in_reward, kl_penalty=self.config.algorithm.kl_penalty
)
metrics.update(kl_metrics)
else:
batch.batch["token_level_rewards"] = batch.batch["token_level_scores"]
# compute advantages, executed on the driver process
norm_adv_by_std_in_grpo = self.config.algorithm.get(
"norm_adv_by_std_in_grpo", True
) # GRPO adv normalization factor
batch = compute_advantage(
batch,
adv_estimator=self.config.algorithm.adv_estimator,
gamma=self.config.algorithm.gamma,
lam=self.config.algorithm.lam,
num_repeat=self.config.actor_rollout_ref.rollout.n,
norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo,
config=self.config.algorithm,
)
# update critic
if self.use_critic:
with marked_timer("update_critic", timing_raw, color="pink"):
critic_output = self.critic_wg.update_critic(batch)
critic_output_metrics = reduce_metrics(critic_output.meta_info["metrics"])
metrics.update(critic_output_metrics)
# implement critic warmup
if self.config.trainer.critic_warmup <= self.global_steps:
# update actor
with marked_timer("update_actor", timing_raw, color="red"):
batch.meta_info["multi_turn"] = self.config.actor_rollout_ref.rollout.multi_turn.enable
actor_output = self.actor_rollout_wg.update_actor(batch)
actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"])
metrics.update(actor_output_metrics)
# Log rollout generations if enabled
rollout_data_dir = self.config.trainer.get("rollout_data_dir", None)
if rollout_data_dir:
with marked_timer("dump_rollout_generations", timing_raw, color="green"):
inputs = self.tokenizer.batch_decode(batch.batch["prompts"], skip_special_tokens=True)
outputs = self.tokenizer.batch_decode(batch.batch["responses"], skip_special_tokens=True)
scores = batch.batch["token_level_scores"].sum(-1).cpu().tolist()
sample_gts = [
item.non_tensor_batch.get("reward_model", {}).get("ground_truth", None)
for item in batch
]
if "request_id" in batch.non_tensor_batch:
reward_extra_infos_dict.setdefault(
"request_id",
batch.non_tensor_batch["request_id"].tolist(),
)
self._dump_generations(
inputs=inputs,
outputs=outputs,
gts=sample_gts,
scores=scores,
reward_extra_infos_dict=reward_extra_infos_dict,
dump_path=rollout_data_dir,
)
# validate
if need_validate:
with marked_timer("testing", timing_raw, color="green"):
val_metrics: dict = self._validate()
if is_last_step:
last_val_metrics = val_metrics
metrics.update(val_metrics)
# Check if the ESI (Elastic Server Instance)/training plan is close to expiration.
esi_close_to_expiration = should_save_ckpt_esi(
max_steps_duration=self.max_steps_duration,
redundant_time=self.config.trainer.esi_redundant_time,
)
# Check if the conditions for saving a checkpoint are met.
# The conditions include a mandatory condition (1) and
# one of the following optional conditions (2/3/4):
# 1. The save frequency is set to a positive value.
# 2. It's the last training step.
# 3. The current step number is a multiple of the save frequency.
# 4. The ESI(Elastic Server Instance)/training plan is close to expiration.
if self.config.trainer.save_freq > 0 and (
is_last_step or self.global_steps % self.config.trainer.save_freq == 0 or esi_close_to_expiration
):
if esi_close_to_expiration:
print("Force saving checkpoint: ESI instance expiration approaching.")
with marked_timer("save_checkpoint", timing_raw, color="green"):
self._save_checkpoint()
with marked_timer("stop_profile", timing_raw):
next_step_profile = (
self.global_steps + 1 in self.config.global_profiler.steps
if self.config.global_profiler.steps is not None
else False
)
self._stop_profiling(
curr_step_profile and not next_step_profile
if self.config.global_profiler.profile_continuous_steps
else curr_step_profile
)
prev_step_profile = curr_step_profile
curr_step_profile = next_step_profile
steps_duration = timing_raw["step"]
self.max_steps_duration = max(self.max_steps_duration, steps_duration)
# training metrics
metrics.update(
{
"training/global_step": self.global_steps,
"training/epoch": epoch,
}
)
# collect metrics
metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic))
metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw))
# TODO: implement actual tflpo and theoretical tflpo
n_gpus = self.resource_pool_manager.get_n_gpus()
metrics.update(compute_throughout_metrics(batch=batch, timing_raw=timing_raw, n_gpus=n_gpus))
# this is experimental and may be changed/removed in the future in favor of a general-purpose one
if isinstance(self.train_dataloader.sampler, AbstractCurriculumSampler):
self.train_dataloader.sampler.update(batch=batch)
# TODO: make a canonical logger that supports various backend
logger.log(data=metrics, step=self.global_steps)
progress_bar.update(1)
self.global_steps += 1
if (
hasattr(self.config.actor_rollout_ref.actor, "profiler")
and self.config.actor_rollout_ref.actor.profiler.tool == "torch_memory"
):
self.actor_rollout_wg.dump_memory_snapshot(
tag=f"post_update_step{self.global_steps}", sub_dir=f"step{self.global_steps}"
)
if is_last_step:
pprint(f"Final validation metrics: {last_val_metrics}")
progress_bar.close()
return
# this is experimental and may be changed/removed in the future
# in favor of a general-purpose data buffer pool
if hasattr(self.train_dataset, "on_batch_end"):
# The dataset may be changed after each training batch
self.train_dataset.on_batch_end(batch=batch)
def _validate(self):
data_source_lst = []
reward_extra_infos_dict: dict[str, list] = defaultdict(list)
# Lists to collect samples for the table
sample_scores = []
sample_turns = []
sample_uids = []
for test_data in self.val_dataloader:
test_batch = DataProto.from_single_dict(test_data)
if len(test_batch) < self.config.data.val_batch_size:
print(f"drop last batch in val_dataloader, len {len(test_batch)}")
break
if "uid" not in test_batch.non_tensor_batch:
test_batch.non_tensor_batch["uid"] = np.array(
[str(uuid.uuid4()) for _ in range(len(test_batch))], dtype=object
)
test_gen_batch = self._get_gen_batch(test_batch)
test_gen_batch.meta_info = {
"eos_token_id": self.tokenizer.eos_token_id,
"pad_token_id": self.tokenizer.pad_token_id,
"prompt_length": self.config.actor_rollout_ref.rollout.prompt_length,
"recompute_log_prob": False,
"do_sample": self.config.actor_rollout_ref.rollout.val_kwargs.do_sample,
"temperature": self.config.actor_rollout_ref.rollout.temperature,
"n_samples": self.config.actor_rollout_ref.rollout.n,
"validate": True,
"global_steps": self.global_steps,
}
test_gen_batch = test_gen_batch.repeat(
repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True
)
sample_uids.extend(test_gen_batch.non_tensor_batch["uid"])
# pad to be divisible by dp_size
size_divisor = self.config.env.train.num_envs * self.config.env.rollout.pipeline_stage_num
test_gen_batch_padded, pad_size = pad_dataproto_to_divisor(test_gen_batch, size_divisor)
reset_future = self._reset_envs(test_gen_batch_padded)
test_output_gen_batch_padded = self.async_rollout_manager.generate_sequences(
test_gen_batch_padded, reset_future
)
# unpad
test_output_gen_batch = unpad_dataproto(test_output_gen_batch_padded, pad_size=pad_size)
print("validation generation end")
test_batch = test_output_gen_batch
test_batch.meta_info["validate"] = True
# evaluate using reward_function
if self.val_reward_fn is None:
raise ValueError("val_reward_fn must be provided for validation.")
result = self.val_reward_fn(test_batch, return_dict=True)
reward_tensor = result["reward_tensor"]
scores = reward_tensor.sum(-1).cpu().tolist()
sample_scores.extend(scores)
reward_extra_infos_dict["reward"].extend(scores)
print(f"len reward_extra_infos_dict['reward']: {len(reward_extra_infos_dict['reward'])}")
if "reward_extra_info" in result:
for key, lst in result["reward_extra_info"].items():
reward_extra_infos_dict[key].extend(lst)
print(f"len reward_extra_infos_dict['{key}']: {len(reward_extra_infos_dict[key])}")
# collect num_turns of each prompt
if "__num_turns__" in test_batch.non_tensor_batch:
sample_turns.append(test_batch.non_tensor_batch["__num_turns__"])
data_source_lst.append(test_batch.non_tensor_batch.get("data_source", ["unknown"] * reward_tensor.shape[0]))
for key_info, lst in reward_extra_infos_dict.items():
assert len(lst) == 0 or len(lst) == len(sample_scores), f"{key_info}: {len(lst)=}, {len(sample_scores)=}"
data_sources = np.concatenate(data_source_lst, axis=0)
data_src2var2metric2val = process_validation_metrics(data_sources, sample_uids, reward_extra_infos_dict)
metric_dict = {}
for data_source, var2metric2val in data_src2var2metric2val.items():
core_var = "acc" if "acc" in var2metric2val else "reward"
for var_name, metric2val in var2metric2val.items():
n_max = max([int(name.split("@")[-1].split("/")[0]) for name in metric2val.keys()])
for metric_name, metric_val in metric2val.items():
if (
(var_name == core_var)
and any(metric_name.startswith(pfx) for pfx in ["mean", "maj", "best"])
and (f"@{n_max}" in metric_name)
):
metric_sec = "val-core"
else:
metric_sec = "val-aux"
pfx = f"{metric_sec}/{data_source}/{var_name}/{metric_name}"
metric_dict[pfx] = metric_val
if len(sample_turns) > 0:
sample_turns = np.concatenate(sample_turns)
metric_dict["val-aux/num_turns/min"] = sample_turns.min()
metric_dict["val-aux/num_turns/max"] = sample_turns.max()
metric_dict["val-aux/num_turns/mean"] = sample_turns.mean()
return metric_dict
|
verl__experimental__vla__rob_ray_trainer.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Any, Literal
import torch
from verl import DataProto
class SupportSACTraining:
"""
Base class for Soft Actor-Critic (SAC).
Subclasses implement a Policy that can be plugged directly into SAC training.
This implementation requires the actor and critic to be integrated within a
single model instance, e.g., sharing a backbone with an additional MLP head
that outputs critic values (Q/V) alongside the actor's action distribution.
Note:
This class intentionally does NOT inherit from `abc.ABC`.
The root model may be wrapped or transformed by FSDP (Fully Sharded
Data Parallel), which performs runtime class substitution; using
`ABCMeta` can break FSDP's class rewriting mechanism.
"""
def sac_init(self):
raise NotImplementedError("Subclasses must implement sac_init method.")
def sac_forward_critic(
self,
a: dict[str, torch.Tensor],
state_features: Any,
*,
use_target_network: bool = False,
method: Literal["cat", "min"] = "cat",
requires_grad: bool = False,
) -> torch.Tensor:
"""Compute Q-values for given state-action pairs.
Args:
a: Dictionary of tensors representing actions, with key:
- "full_action": torch.Tensor of shape (B, action_steps, action_dim)
state_features: Any data structure representing the processed state features.
use_target_network: Whether to use the target critic network heads.
method: Method to combine multiple heads' outputs ("cat" or "min").
requires_grad: Whether to enable gradients for the critic head parameters.
Returns:
q_values: torch.Tensor of shape (B, num_heads) if method is "cat",
or (B, 1) if method is "min", representing the computed Q-values
"""
raise NotImplementedError("Subclasses must implement sac_forward_critic method.")
def sac_forward_actor(
self,
state_features: Any,
) -> tuple[torch.Tensor, torch.Tensor]:
"""Compute actions and their log probabilities from state features.
Args:
state_features: Any data structure representing the processed state features.
Returns:
actions: torch.Tensor of shape (B, n_action_steps, action_dim), sampled actions.
log_probs: torch.Tensor of shape (B,), log probabilities of the sampled actions.
"""
raise NotImplementedError("Subclasses must implement sac_forward_actor method.")
def sac_forward_state_features(self, s: dict[str, torch.Tensor]) -> Any:
"""Compute state features needed for SAC actor and critic.
Args:
s: Dictionary of tensors representing the states, with keys
- "images": torch.Tensor of shape (B, n_images, C, H, W)
- "image_masks": torch.Tensor of shape (B, n_images)
- "lang_tokens": torch.Tensor of shape (B, L)
- "lang_masks": torch.Tensor of shape (B, L)
- "states": torch.Tensor of shape (B, state_dim)
Returns:
state_features: Any data structure representing the processed state features.
"""
raise NotImplementedError("Subclasses must implement sac_forward_state_features method.")
def sac_update_target_network(self, tau: float):
"""Update the target network heads using Polyak averaging.
Args:
tau: The interpolation parameter for Polyak averaging.
"""
raise NotImplementedError("Subclasses must implement sac_update_target_network method.")
class BaseSACActor(ABC):
@abstractmethod
def update_policy(self, data: DataProto) -> dict:
"""
Update the policy using the provided data batch.
Args:
data: DataProto containing the following entries in `data.batch`:
- "a0.full_action": Tensor of shape (B, action_steps, action_dim),
representing the current action chunk for each sample.
- "a1.full_action": Tensor of shape (B, action_steps, action_dim),
representing the next action chunk for each sample.
- "s0.states": Tensor of shape (B, state_dim),
representing the current environment or agent state.
- "s1.states": Tensor of shape (B, state_dim),
representing the next environment or agent state.
- "s0.images": Tensor of shape (B, n_images, C, H, W),
containing current visual observations.
- "s1.images": Tensor of shape (B, n_images, C, H, W),
containing next-step visual observations.
- "s0.image_masks": Tensor of shape (B, n_images),
indicating valid images per sample.
- "s1.image_masks": Tensor of shape (B, n_images),
indicating valid images per sample.
- "s0.lang_tokens": Tensor of shape (B, max_seq_len),
tokenized language instructions.
- "s1.lang_tokens": Tensor of shape (B, max_seq_len),
tokenized language instructions for the next step.
- "s0.lang_masks": Tensor of shape (B, max_seq_len),
attention masks for language tokens.
- "s1.lang_masks": Tensor of shape (B, max_seq_len),
attention masks for language tokens for the next step.
- "rewards": Tensor of shape (B,),
chunk-level scalar rewards aligned to the next step.
- "response_mask": Tensor of shape (B, action_steps),
mask indicating whether each sample has a valid response.
"""
raise NotImplementedError("Subclasses must implement update_policy method.")
|
verl__experimental__vla__sac__base.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
In single GPU rollout, the sequences are generated directly by sampling from the model.
The output will contain
1. output_ids
2. attention_masks (left padding)
3. eos_masks
4. log_probs
"""
import logging
from typing import Any
import torch
from verl import DataProto
from verl.experimental.vla.naive_rollout_rob import NaiveRolloutRob
from verl.utils.device import get_device_id, get_device_name
logger = logging.getLogger(__name__)
__all__ = ["PI0RolloutRob"]
class PI0RolloutRob(NaiveRolloutRob):
def __init__(
self,
model_config: dict,
module: torch.nn.Module,
tokenizer: Any,
):
self.model_config = model_config
self.module = module
self.tokenizer = tokenizer
from torch.distributed.fsdp import register_fsdp_forward_method
register_fsdp_forward_method(self.module, "sample_actions")
@torch.no_grad()
def generate_sequences(self, prompts: DataProto) -> DataProto:
"""Generate sequences"""
with torch.autocast(device_type=get_device_name(), dtype=torch.bfloat16):
prompts.to(get_device_id())
output, s, a = self.module.sample_actions(prompts, tokenizer=self.tokenizer)
ret = DataProto.from_dict(
{
"action": output.action,
"full_action": a["full_action"],
"images": s["images"],
"image_masks": s["image_masks"],
"lang_tokens": s["lang_tokens"],
"lang_masks": s["lang_masks"],
"states": s["states"],
}
)
return ret
|
verl__experimental__vla__sac__naive_rollout_pi05.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import torch
from tensordict import TensorDict
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class SACReplayPool:
"""SAC Replay Pool for storing samples."""
def __init__(
self,
capacity: int,
pool_device: str = "cpu",
sample_device: str = "cpu",
):
self.pool = None
self.capacity = capacity
self.size = 0
self.position = 0
self.rank = torch.distributed.get_rank() if torch.distributed.is_initialized() else 0
self.pool_device = pool_device
self.sample_device = sample_device
def add_batch(self, batch: TensorDict):
"""Add a batch of samples to the replay pool.
Args:
batch (TensorDict): A batch of samples to add. The batch should be a TensorDict
containing the necessary keys for SAC training, each with shape [batch_size, ...].
"""
if self.pool is None:
self._lazy_init_pool(batch)
self._insert_block_to_pool(batch)
def sample_batch(self, batch_size: int) -> TensorDict:
"""Sample a batch of experiences from the replay pool.
Args:
batch_size (int): The number of samples to draw.
Returns:
TensorDict: A batch of sampled experiences.
"""
assert self.size >= batch_size, "Not enough samples in the replay pool to sample the requested batch size."
idx = torch.randperm(self.size)[:batch_size]
sampled_batch = TensorDict(
{key: value.index_select(0, idx).to(self.sample_device) for key, value in self.pool.items()},
batch_size=[batch_size],
device=self.sample_device,
)
return sampled_batch
def insert_and_resample(
self,
source: TensorDict,
) -> TensorDict:
"""Insert a block of data from source to the replay pool and sample a batch with the same size."""
self.add_batch(source)
return self.sample_batch(source.size(0))
def save(self, directory: str):
"""Save the replay pool to a directory."""
os.makedirs(directory, exist_ok=True)
filepath = f"{directory}/sac_replay_pool_rank_{self.rank}.pt"
if self.pool is not None:
meta_info = {
"size": self.size,
"capacity": self.capacity,
"position": self.position,
"pool_device": self.pool_device,
"sample_device": self.sample_device,
}
torch.save((self.pool.cpu(), meta_info), filepath)
logger.info(f"[Rank {self.rank}] Replay pool saved to {filepath} with size: {self.size}")
else:
logger.info("Replay pool is empty. Nothing to save.")
def load(self, directory: str):
"""Load the replay pool from a directory."""
filepath = f"{directory}/sac_replay_pool_rank_{self.rank}.pt"
if not os.path.exists(filepath):
return False
try:
pool, meta_info = torch.load(filepath, weights_only=False)
except (RuntimeError, EOFError, ValueError) as exc:
logger.warning(
f"[Rank {self.rank}] Failed to load replay pool from {filepath}: {exc}. "
"Starting with an empty replay pool."
)
return False
self.pool = pool.to(self.pool_device)
if meta_info["capacity"] != self.capacity:
if meta_info["capacity"] > self.capacity:
logger.warning(
f"Loaded replay pool capacity {meta_info['capacity']} is greater than "
f"the current capacity {self.capacity}. Truncating the loaded pool."
)
self.pool = TensorDict(
{key: value[: self.capacity] for key, value in pool.items()},
batch_size=[self.capacity],
device=self.pool_device,
)
self.size = min(self.size, self.capacity)
self.position = self.position % self.capacity
else:
logger.warning(
f"Loaded replay pool capacity {meta_info['capacity']} is less than "
f"the current capacity {self.capacity}. Keeping the current capacity."
)
self.pool = TensorDict(
{
key: torch.cat(
[
value,
torch.zeros(
(self.capacity - meta_info["capacity"], *value.shape[1:]),
dtype=value.dtype,
device=self.pool_device,
),
],
dim=0,
)
for key, value in pool.items()
},
batch_size=[self.capacity],
device=self.pool_device,
)
self.size = min(meta_info["size"], self.capacity)
self.position = meta_info["position"] % self.capacity
logger.info(f"[Rank {self.rank}] Replay pool loaded from {filepath} with size: {self.size}")
return True
@classmethod
def from_path(
cls,
directory: str,
) -> "SACReplayPool":
"""Load a replay pool from a file.
Args:
directory (str): The directory containing the saved replay pool.
Returns:
SACReplayPool: An instance of SACReplayPool with the loaded data.
"""
rank = torch.distributed.get_rank() if torch.distributed.is_initialized() else 0
filepath = f"{directory}/sac_replay_pool_rank_{rank}.pt"
pool, meta_info = torch.load(filepath, weights_only=False)
replay_pool = cls(
capacity=meta_info["capacity"],
pool_device=meta_info["pool_device"],
sample_device=meta_info["sample_device"],
)
replay_pool.pool = pool.to(replay_pool.pool_device)
replay_pool.rank = rank
replay_pool.size = meta_info["size"]
replay_pool.position = meta_info["position"]
logger.info(f"[Rank {rank}] Replay pool loaded from {filepath} with size: {replay_pool.size}")
return replay_pool
def _insert_block_to_pool(
self,
source: TensorDict,
):
"""insert a block of data from source to the replay pool."""
length = min(source.size(0), self.capacity)
idx = (self.position + torch.arange(length)) % self.capacity
for key in source.keys():
self.pool[key].index_copy_(0, idx, source[key][:length].to(self.pool_device))
self.position = (self.position + length) % self.capacity
self.size = min(self.size + length, self.capacity)
def _lazy_init_pool(self, sample: TensorDict):
"""Lazily initialize the replay pool based on the sample structure."""
logger.info(f"Initializing replay pool with capacity: {self.capacity}")
self.pool = TensorDict(
{
key: torch.zeros((self.capacity, *value.shape[1:]), dtype=value.dtype, device=self.pool_device)
for key, value in sample.items()
},
batch_size=[self.capacity],
device=self.pool_device,
)
def __repr__(self):
return (
f"SACReplayPool(capacity={self.capacity}, "
f"size={self.size}, pool_device={self.pool_device}, sample_device={self.sample_device})"
)
def __len__(self):
return self.size
|
verl__experimental__vla__sac__replay_pool.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Single Process Actor
"""
import logging
import os
import numpy as np
import torch
import torch.nn.functional as F
from tensordict import TensorDict
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from typing_extensions import override
from verl.experimental.vla.sac.replay_pool import SACReplayPool
from verl.protocol import DataProto
from verl.utils.device import get_device_id, get_device_name
from .base import BaseSACActor, SupportSACTraining
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def get_dict_from_prefix(tensordict: TensorDict, prefix: str) -> dict:
"""Extract a sub-dictionary from a TensorDict based on a given prefix.
Args:
tensordict: The input TensorDict containing various keys.
prefix: The prefix string to filter keys.
Returns:
A dictionary containing key-value pairs from the TensorDict
where the keys start with the specified prefix. The prefix is removed
from the keys in the resulting dictionary.
"""
result = {}
prefix_length = len(prefix)
for key in tensordict.keys():
if key.startswith(prefix):
new_key = key[prefix_length:]
result[new_key] = tensordict[key]
return result
def merge_nested_dicts_or_tuples(a: dict | tuple, b: dict | tuple) -> dict | tuple:
"""Merge two nested structures (dictionaries or tuples) by concatenating tensors
along the first dimension.
"""
if isinstance(a, dict) and isinstance(b, dict):
merged = {}
for key in a.keys():
merged[key] = merge_nested_dicts_or_tuples(a[key], b[key])
return merged
elif isinstance(a, tuple) and isinstance(b, tuple):
merged = []
for item_a, item_b in zip(a, b, strict=False):
merged.append(merge_nested_dicts_or_tuples(item_a, item_b))
return tuple(merged)
else:
return torch.cat([a, b], dim=0)
def split_nested_dicts_or_tuples(data: dict | tuple, split_num: int) -> list[dict | tuple]:
"""Split a nested structure (dictionary or tuple) into smaller chunks along the first dimension."""
if isinstance(data, torch.Tensor):
split_tensors = torch.chunk(data, split_num, dim=0)
return list(split_tensors)
elif isinstance(data, dict):
split_dicts = [dict() for _ in range(split_num)]
for key, value in data.items():
split_values = split_nested_dicts_or_tuples(value, split_num)
for i in range(split_num):
split_dicts[i][key] = split_values[i]
return split_dicts
elif isinstance(data, tuple):
split_tuples = [list() for _ in range(split_num)]
for item in data:
split_items = split_nested_dicts_or_tuples(item, split_num)
for i in range(split_num):
split_tuples[i].append(split_items[i])
return [tuple(split_tuple) for split_tuple in split_tuples]
else:
raise TypeError("Input data must be a torch.Tensor, dict, or tuple.")
class RobDataParallelSACActor(BaseSACActor):
def __init__(
self,
config,
actor_module: SupportSACTraining,
actor_optimizer: torch.optim.Optimizer,
tokenizer=None,
):
super().__init__()
self.config = config
self.sac_config = config.sac
self.device = get_device_name()
self.actor_optimizer = actor_optimizer
self.actor_module = actor_module
self.actor_module.sac_init()
self.tokenizer = tokenizer
self.replay_pool = SACReplayPool(capacity=self.config.replay_pool_capacity, sample_device=self.device)
self.replay_pool.load(self.config.replay_pool_save_dir)
self._init_alpha()
def _init_alpha(self):
"""Initialize the alpha optimizer for automatic entropy tuning."""
self.auto_entropy = self.sac_config.get("auto_entropy", False)
if self.auto_entropy:
self.target_entropy = torch.tensor(float(self.sac_config.get("target_entropy", -32.0)), device=self.device)
# Initialize raw_alpha parameter
self.alpha_type = self.sac_config.get("alpha_type", "softplus")
if self.alpha_type == "exp":
self.raw_alpha = torch.nn.Parameter(
np.log(np.exp(self.sac_config.get("initial_alpha", 1))) * torch.ones(1, device=self.device),
requires_grad=True,
)
elif self.alpha_type == "softplus":
self.raw_alpha = torch.nn.Parameter(
np.log(np.exp(self.sac_config.get("initial_alpha", 0.01)) - 1) * torch.ones(1, device=self.device),
requires_grad=True,
)
else:
return NotImplementedError(f"Unsupported alpha_type: {self.alpha_type}")
# build alpha optimizer and scheduler
self.alpha_optimizer = torch.optim.Adam([self.raw_alpha], lr=self.sac_config.get("alpha_lr", 3e-4))
self.alpha_scheduler = torch.optim.lr_scheduler.ConstantLR(self.alpha_optimizer, factor=1.0)
def _get_alpha(self) -> torch.Tensor:
if self.auto_entropy:
if self.alpha_type == "exp":
return self.raw_alpha.exp()
elif self.alpha_type == "softplus":
return torch.nn.functional.softplus(self.raw_alpha)
else:
return NotImplementedError(f"Unsupported alpha_type: {self.alpha_type}")
else:
return torch.tensor(float(self.sac_config.get("initial_alpha", 0.2)), device=self.device)
def _calculate_actor_loss(
self,
log_probs: torch.Tensor,
q_values: torch.Tensor,
valid: torch.Tensor,
) -> torch.Tensor:
"""Calculate actor loss using the SAC loss function.
Args:
log_probs: Tensor of shape (B,) representing the log probabilities of actions.
q_values: Tensor of shape (B,) representing the Q-values for the actions.
valid: Tensor of shape (B,) indicating valid samples (1 for valid, 0 for invalid).
Returns:
Tensor of shape (1,) representing the actor loss.
"""
alpha = self._get_alpha()
loss = alpha * log_probs - q_values
actor_loss = (loss * valid).sum() / (valid.sum().clamp_min(1.0))
return actor_loss
def _calculate_alpha_loss(self, log_probs: torch.Tensor, valid: torch.Tensor) -> torch.Tensor:
"""Calculate alpha loss for automatic entropy tuning.
Args:
log_probs: Tensor of shape (B,) representing the log probabilities of actions.
valid: Tensor of shape (B,) indicating valid samples (1 for valid, 0 for invalid).
Returns:
Tensor of shape (1,) representing the alpha loss.
"""
alpha_loss = -self._get_alpha() * (log_probs.detach() + self.target_entropy)
alpha_loss = (alpha_loss * valid).sum() / (valid.sum().clamp_min(1.0))
return alpha_loss
def _calculate_critic_loss(
self,
q_predict: torch.Tensor,
q_target: torch.Tensor,
rewards: torch.Tensor,
valid: torch.Tensor,
next_log_prob: torch.Tensor,
) -> torch.Tensor:
"""Calculate critic loss using the SAC loss function.
Args:
q_predict: Tensor of shape (B, critic_num) representing predicted Q-values.
q_target: Tensor of shape (B,) representing target Q-values.
rewards: Tensor of shape (B,) representing rewards.
valid: Tensor of shape (B,) indicating valid samples (1 for valid, 0 for invalid).
next_log_prob: Tensor of shape (B,) representing log probabilities of next actions.
Returns:
Tensor of shape (1,) representing the critic loss.
"""
gamma = self.sac_config.gamma
alpha = self._get_alpha()
with torch.no_grad():
y = rewards + valid * gamma * (q_target - alpha * next_log_prob)
y = y.unsqueeze(1).expand_as(q_predict) # (B, critic_num)
valid_mask = valid.unsqueeze(1)
mse = F.mse_loss(q_predict, y, reduction="none")
per_critic = (mse * valid_mask).sum(dim=0) / valid_mask.sum().clamp_min(1.0)
critic_loss = per_critic.sum()
return critic_loss
def _forward_critic(self, micro_batch: TensorDict) -> torch.Tensor:
s0 = get_dict_from_prefix(micro_batch, "s0.")
s1 = get_dict_from_prefix(micro_batch, "s1.")
a0 = get_dict_from_prefix(micro_batch, "a0.")
with torch.autocast(device_type=get_device_name(), dtype=torch.bfloat16):
with torch.no_grad():
s = merge_nested_dicts_or_tuples(s0, s1)
state_features = self.actor_module.sac_forward_state_features(s)
s0_state_features, s1_state_features = split_nested_dicts_or_tuples(state_features, 2)
a1_actions, log_probs_1 = self.actor_module.sac_forward_actor(s1_state_features)
q_values_0 = self.actor_module.sac_forward_critic(
a0,
s0_state_features,
use_target_network=False,
method="cat",
requires_grad=True,
)
q_values_1 = self.actor_module.sac_forward_critic(
{"full_action": a1_actions},
s1_state_features,
use_target_network=True,
method="min",
requires_grad=False,
)
critic_loss = self._calculate_critic_loss(
q_predict=q_values_0,
q_target=q_values_1,
rewards=micro_batch["rewards"].max(dim=-1).values,
valid=micro_batch["valid"],
next_log_prob=log_probs_1,
)
return critic_loss
def _forward_actor(self, micro_batch: TensorDict) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
micro_batch = micro_batch.to(get_device_id())
s0 = get_dict_from_prefix(micro_batch, "s0.")
with torch.autocast(device_type=get_device_name(), dtype=torch.bfloat16):
s0_state_features = self.actor_module.sac_forward_state_features(s0)
a0_actions, log_probs_0 = self.actor_module.sac_forward_actor(s0_state_features)
q_values_0 = self.actor_module.sac_forward_critic(
{"full_action": a0_actions},
s0_state_features,
use_target_network=False,
method="min",
requires_grad=False,
)
actor_loss = self._calculate_actor_loss(
log_probs=log_probs_0,
q_values=q_values_0,
valid=micro_batch["valid"],
)
return actor_loss, log_probs_0
@override
def update_policy(self, data: DataProto):
batch: TensorDict = data.select(
[
"a0.full_action",
"a1.full_action",
"s0.states",
"s1.states",
"s0.images",
"s1.images",
"s0.image_masks",
"s1.image_masks",
"s0.lang_tokens",
"s1.lang_tokens",
"s0.lang_masks",
"s1.lang_masks",
"rewards",
"response_mask",
]
).batch
batch = self.replay_pool.insert_and_resample(batch)
batch["valid"] = batch["response_mask"].any(dim=-1).float() # (B,)
micro_batches = batch.split(self.config.ppo_micro_batch_size_per_gpu)
global_steps = data.meta_info["global_steps"]
grad_accum_steps = len(micro_batches) * torch.distributed.get_world_size()
actor_logprobs_list = []
actor_loss_list, critic_loss_list, alpha_loss_list = [], [], []
# Training critic
self.actor_optimizer.zero_grad()
for batch_idx, micro_batch in enumerate(micro_batches):
logger.info(f"[{batch_idx + 1}/{len(micro_batches)}] critic micro batch ")
micro_batch = micro_batch.to(get_device_id())
raw_critic_loss = self._forward_critic(micro_batch)
(raw_critic_loss / grad_accum_steps).backward()
critic_loss_list.append(raw_critic_loss.detach().item())
critic_grad_norm = self._optimizer_step()
if global_steps >= self.config.critic_warmup_steps:
# Training actor
self.actor_optimizer.zero_grad()
for batch_idx, micro_batch in enumerate(micro_batches):
logger.info(f"[{batch_idx + 1}/{len(micro_batches)}] actor micro batch ")
micro_batch = micro_batch.to(get_device_id())
raw_actor_loss, log_probs = self._forward_actor(micro_batch)
(raw_actor_loss / grad_accum_steps).backward()
actor_loss_list.append(raw_actor_loss.detach().item())
actor_logprobs_list.append(log_probs.detach())
actor_grad_norm = self._optimizer_step()
# Training alpha
# NOTE: We reuse the log-probabilities computed during the actor forward pass
# to update the entropy temperature (alpha), instead of re-forwarding
# the actor after the policy update (saving compute).
if self.auto_entropy:
self.alpha_optimizer.zero_grad()
for micro_batch, log_probs in zip(micro_batches, actor_logprobs_list, strict=False):
micro_batch = micro_batch.to(get_device_id())
raw_alpha_loss = self._calculate_alpha_loss(log_probs, micro_batch["valid"])
(raw_alpha_loss / grad_accum_steps).backward()
alpha_loss_list.append(raw_alpha_loss.detach().item())
torch.distributed.all_reduce(self.raw_alpha.grad, op=torch.distributed.ReduceOp.SUM)
alpha_grad_norm = torch.nn.utils.clip_grad_norm_(self.raw_alpha, max_norm=self.config.grad_clip)
self.alpha_optimizer.step()
self.alpha_scheduler.step()
# Update target networks
self.actor_module.sac_update_target_network(self.sac_config.tau)
# Save replay pool
if global_steps % self.config.replay_pool_save_interval == 0:
self.replay_pool.save(self.config.replay_pool_save_dir)
# Log metrics
metrics = {
"data/reward_mean": (batch["rewards"].max(dim=-1).values * batch["valid"]).sum().item()
/ batch["valid"].sum().clamp_min(1.0).item(),
"data/valid_ratio": batch["valid"].float().mean().item(),
"sac/alpha": self._get_alpha().detach().item(),
"sac/alpha_lr": self.alpha_optimizer.param_groups[0]["lr"] if self.auto_entropy else 0.0,
"sac/alpha_loss": sum(alpha_loss_list) / len(alpha_loss_list) if alpha_loss_list else 0.0,
"sac/alpha_grad_norm": alpha_grad_norm.detach().item()
if self.auto_entropy and global_steps >= self.config.critic_warmup_steps
else 0.0,
"sac/replay_pool_size": len(self.replay_pool),
"actor/loss": sum(actor_loss_list) / len(actor_loss_list) if actor_loss_list else 0.0,
"actor/lr": self.actor_optimizer.param_groups[0]["lr"],
"actor/grad_norm": actor_grad_norm.detach().item()
if global_steps >= self.config.critic_warmup_steps
else 0.0,
"actor/logprob_mean": torch.cat(actor_logprobs_list).mean().detach().item() if actor_logprobs_list else 0.0,
"critic/loss": sum(critic_loss_list) / len(critic_loss_list) if critic_loss_list else 0.0,
"critic/grad_norm": critic_grad_norm.detach().item(),
}
return metrics
def _optimizer_step(self) -> torch.Tensor:
assert self.config.grad_clip is not None
if isinstance(self.actor_module, FSDP):
grad_norm = self.actor_module.clip_grad_norm_(max_norm=self.config.grad_clip)
else:
grad_norm = torch.nn.utils.clip_grad_norm_(self.actor_module.parameters(), max_norm=self.config.grad_clip)
self.actor_optimizer.step()
return grad_norm
|
verl__experimental__vla__sac__sac_actor.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import uuid
from collections import defaultdict
from pprint import pprint
import numpy as np
import torch
from omegaconf import OmegaConf
from tqdm import tqdm
from verl import DataProto
from verl.experimental.dataset.sampler import AbstractCurriculumSampler
from verl.protocol import pad_dataproto_to_divisor, unpad_dataproto
from verl.single_controller.ray import RayClassWithInitArgs
from verl.single_controller.ray.base import create_colocated_worker_cls
from verl.trainer.ppo.metric_utils import (
compute_throughout_metrics,
process_validation_metrics,
)
from verl.trainer.ppo.ray_trainer import RayPPOTrainer
from verl.trainer.ppo.reward import compute_reward
from verl.trainer.ppo.utils import Role
from verl.utils.checkpoint.checkpoint_manager import should_save_ckpt_esi
from verl.utils.debug import marked_timer
from verl.utils.metric import reduce_metrics
def compute_response_mask(config, data: DataProto) -> torch.Tensor:
"""Compute the attention mask for the response part of the sequence.
This function extracts the portion of the attention mask that corresponds to the model's response,
which is used for masking computations that should only apply to response tokens.
Args:
data (DataProto): The data containing batched model outputs and inputs.
Returns:
torch.Tensor: The attention mask for the response tokens.
"""
complete = data.batch["complete"] # shape: [batch_size, num_steps, chunk_size]
complete_traj = complete.view(complete.shape[0], -1) # # shape: [batch_size, num_steps * chunk_size]
batch_size, action_steps = complete_traj.shape
step_indices = torch.arange(action_steps, device=complete.device).unsqueeze(0).expand(batch_size, -1)
first_true_idx_approx = torch.argmax(complete_traj.long(), dim=1)
has_any_true = complete_traj.any(dim=1)
final_first_true_idx = torch.where(
has_any_true, first_true_idx_approx, torch.tensor(action_steps - 1, device=complete.device)
)
mask_traj = step_indices <= final_first_true_idx.unsqueeze(1)
mask = mask_traj.view(complete.shape) # shape: [batch_size, num_steps, chunk_size]
mask = mask.repeat_interleave(config.env.actor.model.action_dim, dim=-1) # eapand to action dim
return mask
def flatten_trajectories(data: DataProto) -> DataProto:
batch_size, num_steps = data.batch["action"].shape[:2]
new_batch_fields = {}
for key, tensor in data.batch.items():
if len(tensor.shape) >= 2 and tensor.shape[0] == batch_size and tensor.shape[1] == num_steps:
# (B, S, H, W) -> (B*S, H, W)
new_shape = (batch_size * num_steps, *tensor.shape[2:])
new_batch_fields[key] = tensor.reshape(new_shape)
elif len(tensor.shape) == 1 and tensor.shape[0] == batch_size:
# [e1, e2] -> [e1, e1, ..., e2, e2, ...] (S times each)
new_batch_fields[key] = tensor.repeat_interleave(num_steps)
else:
new_batch_fields[key] = tensor
new_data = DataProto.from_dict(tensors=new_batch_fields, meta_info=data.meta_info)
return new_data
def add_transition_prefixes(data: DataProto) -> DataProto:
batch = data.batch
step_key = "action" if "action" in batch else "full_action"
if step_key not in batch:
return data
num_steps = batch[step_key].shape[1]
if num_steps <= 1:
return data
def drop_last(tensor: torch.Tensor) -> torch.Tensor:
return tensor[:, :-1, ...]
def shift_next(tensor: torch.Tensor) -> torch.Tensor:
return tensor[:, 1:, ...]
state_keys = ["states", "images", "image_masks", "lang_tokens", "lang_masks"]
action_keys = ["full_action", "action"]
for key in state_keys:
if key in batch:
batch[f"s0.{key}"] = drop_last(batch[key])
batch[f"s1.{key}"] = shift_next(batch[key])
for key in action_keys:
if key in batch:
batch[f"a0.{key}"] = drop_last(batch[key])
batch[f"a1.{key}"] = shift_next(batch[key])
batch_size = batch[step_key].shape[0]
for key, tensor in list(batch.items()):
if tensor.ndim >= 2 and tensor.shape[0] == batch_size and tensor.shape[1] == num_steps:
batch[key] = drop_last(tensor)
return data
class RobRaySACTrainer(RayPPOTrainer):
def _start_profiling(self, do_profile: bool) -> None:
"""Start profiling for all worker groups including env workers."""
super()._start_profiling(do_profile)
if do_profile and hasattr(self, "env_wg"):
self.env_wg.start_profile(role="env", profile_step=self.global_steps)
def _stop_profiling(self, do_profile: bool) -> None:
"""Stop profiling for all worker groups including env workers."""
super()._stop_profiling(do_profile)
if do_profile and hasattr(self, "env_wg"):
self.env_wg.stop_profile()
def init_workers(self):
self.resource_pool_manager.create_resource_pool()
if self.config.env.disagg_sim.enable:
# pin EnvWorker to Simulator GPU nodes
self.resource_pool_manager.get_resource_pool(Role.Env).accelerator_type = "sim"
self.resource_pool_manager.get_resource_pool(Role.ActorRollout).accelerator_type = "train_rollout"
self.resource_pool_to_cls = {pool: {} for pool in self.resource_pool_manager.resource_pool_dict.values()}
resource_pool = self.resource_pool_manager.get_resource_pool(Role.ActorRollout)
actor_rollout_cls = RayClassWithInitArgs(
cls=self.role_worker_mapping[Role.ActorRollout],
config=self.config.actor_rollout_ref,
role="actor_rollout",
)
self.resource_pool_to_cls[resource_pool]["actor_rollout"] = actor_rollout_cls
assert Role.Env in self.role_worker_mapping
if Role.Env in self.role_worker_mapping:
resource_pool = self.resource_pool_manager.get_resource_pool(Role.Env)
env_cls = RayClassWithInitArgs(self.role_worker_mapping[Role.Env], config=self.config.env)
self.resource_pool_to_cls[resource_pool]["env"] = env_cls
# initialize WorkerGroup
# NOTE: if you want to use a different resource pool for each role, which can support different parallel size,
# you should not use `create_colocated_worker_cls`.
# Instead, directly pass different resource pool to different worker groups.
# See https://github.com/volcengine/verl/blob/master/examples/ray/tutorial.ipynb for more information.
all_wg = {}
wg_kwargs = {} # Setting up kwargs for RayWorkerGroup
if OmegaConf.select(self.config.trainer, "ray_wait_register_center_timeout") is not None:
wg_kwargs["ray_wait_register_center_timeout"] = self.config.trainer.ray_wait_register_center_timeout
if OmegaConf.select(self.config.global_profiler, "steps") is not None:
wg_kwargs["profile_steps"] = OmegaConf.select(self.config.global_profiler, "steps")
# Only require nsight worker options when tool is nsys
if OmegaConf.select(self.config.global_profiler, "tool") == "nsys":
assert (
OmegaConf.select(self.config.global_profiler.global_tool_config.nsys, "worker_nsight_options")
is not None
), "worker_nsight_options must be set when using nsys with profile_steps"
wg_kwargs["worker_nsight_options"] = OmegaConf.to_container(
OmegaConf.select(self.config.global_profiler.global_tool_config.nsys, "worker_nsight_options")
)
wg_kwargs["device_name"] = self.device_name
for resource_pool, class_dict in self.resource_pool_to_cls.items():
worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict)
wg_dict = self.ray_worker_group_cls(
resource_pool=resource_pool,
ray_cls_with_init=worker_dict_cls,
**wg_kwargs,
)
spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys())
all_wg.update(spawn_wg)
# we should create rollout at the end so that vllm can have a better estimation of kv cache memory
self.actor_rollout_wg = all_wg["actor_rollout"]
self.actor_rollout_wg.init_model()
self.env_wg = all_wg["env"]
# create async rollout manager and request scheduler
self.async_rollout_mode = False
if self.config.actor_rollout_ref.rollout.mode == "async_envloop":
from verl.experimental.vla.env_loop import EnvLoop
self.async_rollout_mode = True
self.async_rollout_manager = EnvLoop(
config=self.config, rollout_wg=self.actor_rollout_wg, env_wg=self.env_wg
)
def _get_gen_batch(self, batch: DataProto) -> DataProto:
# pop those keys for generation
batch_keys_to_pop = []
non_tensor_batch_keys_to_pop = set(batch.non_tensor_batch.keys())
gen_batch = batch.pop(
batch_keys=batch_keys_to_pop,
non_tensor_batch_keys=list(non_tensor_batch_keys_to_pop),
)
return gen_batch
def _reset_envs(self, gen_batch: DataProto) -> asyncio.Future:
initial_state_ids = gen_batch.non_tensor_batch["state_ids"]
task_ids = gen_batch.non_tensor_batch["task_ids"]
reset_prompts = DataProto.from_dict(non_tensors={"state_ids": initial_state_ids, "task_ids": task_ids})
reset_future = self.env_wg.reset_envs_to_state_ids(reset_prompts)
return reset_future
def fit(self):
"""
The training loop of PPO.
The driver process only need to call the compute functions of the worker group through RPC
to construct the PPO dataflow.
The light-weight advantage computation is done on the driver process.
"""
from omegaconf import OmegaConf
from verl.utils.tracking import Tracking
logger = Tracking(
project_name=self.config.trainer.project_name,
experiment_name=self.config.trainer.experiment_name,
default_backend=self.config.trainer.logger,
config=OmegaConf.to_container(self.config, resolve=True),
)
self.global_steps = 0
# load checkpoint before doing anything
self._load_checkpoint()
# perform validation before training
# currently, we only support validation using the reward_function.
if self.val_reward_fn is not None and self.config.trainer.get("val_before_train", True):
val_metrics = self._validate()
assert val_metrics, f"{val_metrics=}"
pprint(f"Initial validation metrics: {val_metrics}")
logger.log(data=val_metrics, step=self.global_steps)
if self.config.trainer.get("val_only", False):
return
# add tqdm
progress_bar = tqdm(total=self.total_training_steps, initial=self.global_steps, desc="Training Progress")
# we start from step 1
self.global_steps += 1
last_val_metrics = None
self.max_steps_duration = 0
prev_step_profile = False
curr_step_profile = (
self.global_steps in self.config.global_profiler.steps
if self.config.global_profiler.steps is not None
else False
)
next_step_profile = False
for epoch in range(self.config.trainer.total_epochs):
train_iter = iter(self.train_dataloader)
next_batch_dict = next(train_iter)
need_validate = False
dataloader_len = len(self.train_dataloader)
print(f"Starting epoch {epoch}, dataloader length: {dataloader_len}")
for step_idx in range(dataloader_len):
batch_dict = next_batch_dict
try:
next_batch_dict = next(train_iter)
except StopIteration:
next_batch_dict = None
metrics = {}
timing_raw = {}
with marked_timer("start_profile", timing_raw):
self._start_profiling(
not prev_step_profile and curr_step_profile
if self.config.global_profiler.profile_continuous_steps
else curr_step_profile
)
batch: DataProto = DataProto.from_single_dict(batch_dict)
# add uid to batch
batch.non_tensor_batch["uid"] = np.array([str(uuid.uuid4()) for _ in range(len(batch))], dtype=object)
gen_batch = self._get_gen_batch(batch)
gen_batch.meta_info["global_steps"] = self.global_steps
gen_batch.meta_info["do_sample"] = True
gen_batch.meta_info["temperature"] = self.config.actor_rollout_ref.rollout.temperature
gen_batch.meta_info["prompt_length"] = self.config.actor_rollout_ref.rollout.prompt_length
gen_batch.meta_info["eos_token_id"] = self.tokenizer.eos_token_id
gen_batch.meta_info["n_samples"] = self.config.actor_rollout_ref.rollout.n
gen_batch.meta_info["pad_token_id"] = self.tokenizer.pad_token_id
gen_batch = gen_batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True)
is_last_step = self.global_steps >= self.total_training_steps
if step_idx == 0 or need_validate:
# reset env workers in first step
# if validation on last step, the reset was not executed and need to be done here
reset_future = self._reset_envs(gen_batch)
need_validate = (
self.val_reward_fn is not None
and self.config.trainer.test_freq > 0
and (is_last_step or self.global_steps % self.config.trainer.test_freq == 0)
)
with marked_timer("step", timing_raw):
# generate a batch
with marked_timer("gen", timing_raw, color="red"):
gen_batch_output = self.async_rollout_manager.generate_sequences(gen_batch, reset_future)
# prepare for next batch's env reset
if step_idx != dataloader_len - 1 and not need_validate:
next_batch: DataProto = DataProto.from_single_dict(next_batch_dict)
next_gen_batch = self._get_gen_batch(next_batch)
next_gen_batch = next_gen_batch.repeat(
repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True
)
reset_future = self._reset_envs(next_gen_batch)
# repeat to align with repeated responses in rollout
batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True)
batch = gen_batch_output
if "response_mask" not in batch.batch.keys():
batch.batch["response_mask"] = compute_response_mask(self.config, batch)
with marked_timer("reward", timing_raw, color="yellow"):
# compute reward model score
reward_tensor, reward_extra_infos_dict = compute_reward(batch, self.reward_fn)
batch.batch["rewards"] = reward_tensor
average_reward = reward_tensor.any(-1).mean(dtype=torch.float32).item()
metrics["data/trajectory_avg_reward"] = average_reward
batch = add_transition_prefixes(batch)
batch = flatten_trajectories(batch)
# batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist()
batch.meta_info["global_token_num"] = [0]
# update actor
if self.config.trainer.critic_warmup <= self.global_steps:
with marked_timer("update_actor", timing_raw, color="red"):
batch.meta_info["multi_turn"] = self.config.actor_rollout_ref.rollout.multi_turn.enable
actor_output = self.actor_rollout_wg.update_actor(batch)
actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"])
metrics.update(actor_output_metrics)
# Log rollout generations if enabled
rollout_data_dir = self.config.trainer.get("rollout_data_dir", None)
if rollout_data_dir:
with marked_timer("dump_rollout_generations", timing_raw, color="green"):
inputs = self.tokenizer.batch_decode(batch.batch["prompts"], skip_special_tokens=True)
outputs = self.tokenizer.batch_decode(batch.batch["responses"], skip_special_tokens=True)
scores = batch.batch["token_level_scores"].sum(-1).cpu().tolist()
sample_gts = [
item.non_tensor_batch.get("reward_model", {}).get("ground_truth", None)
for item in batch
]
if "request_id" in batch.non_tensor_batch:
reward_extra_infos_dict.setdefault(
"request_id",
batch.non_tensor_batch["request_id"].tolist(),
)
self._dump_generations(
inputs=inputs,
outputs=outputs,
gts=sample_gts,
scores=scores,
reward_extra_infos_dict=reward_extra_infos_dict,
dump_path=rollout_data_dir,
)
# validate
if need_validate:
with marked_timer("testing", timing_raw, color="green"):
val_metrics: dict = self._validate()
if is_last_step:
last_val_metrics = val_metrics
metrics.update(val_metrics)
# Check if the ESI (Elastic Server Instance)/training plan is close to expiration.
esi_close_to_expiration = should_save_ckpt_esi(
max_steps_duration=self.max_steps_duration,
redundant_time=self.config.trainer.esi_redundant_time,
)
# Check if the conditions for saving a checkpoint are met.
# The conditions include a mandatory condition (1) and
# one of the following optional conditions (2/3/4):
# 1. The save frequency is set to a positive value.
# 2. It's the last training step.
# 3. The current step number is a multiple of the save frequency.
# 4. The ESI(Elastic Server Instance)/training plan is close to expiration.
if self.config.trainer.save_freq > 0 and (
is_last_step or self.global_steps % self.config.trainer.save_freq == 0 or esi_close_to_expiration
):
if esi_close_to_expiration:
print("Force saving checkpoint: ESI instance expiration approaching.")
with marked_timer("save_checkpoint", timing_raw, color="green"):
self._save_checkpoint()
with marked_timer("stop_profile", timing_raw):
next_step_profile = (
self.global_steps + 1 in self.config.global_profiler.steps
if self.config.global_profiler.steps is not None
else False
)
self._stop_profiling(
curr_step_profile and not next_step_profile
if self.config.global_profiler.profile_continuous_steps
else curr_step_profile
)
prev_step_profile = curr_step_profile
curr_step_profile = next_step_profile
steps_duration = timing_raw["step"]
self.max_steps_duration = max(self.max_steps_duration, steps_duration)
# training metrics
metrics.update(
{
"training/global_step": self.global_steps,
"training/epoch": epoch,
}
)
# collect metrics
# metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic))
# metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw))
# TODO: implement actual tflpo and theoretical tflpo
n_gpus = self.resource_pool_manager.get_n_gpus()
metrics.update(compute_throughout_metrics(batch=batch, timing_raw=timing_raw, n_gpus=n_gpus))
# this is experimental and may be changed/removed in the future in favor of a general-purpose one
if isinstance(self.train_dataloader.sampler, AbstractCurriculumSampler):
self.train_dataloader.sampler.update(batch=batch)
# TODO: make a canonical logger that supports various backend
logger.log(data=metrics, step=self.global_steps)
progress_bar.update(1)
self.global_steps += 1
if (
hasattr(self.config.actor_rollout_ref.actor, "profiler")
and self.config.actor_rollout_ref.actor.profiler.tool == "torch_memory"
):
self.actor_rollout_wg.dump_memory_snapshot(
tag=f"post_update_step{self.global_steps}", sub_dir=f"step{self.global_steps}"
)
if is_last_step:
pprint(f"Final validation metrics: {last_val_metrics}")
progress_bar.close()
return
# this is experimental and may be changed/removed in the future
# in favor of a general-purpose data buffer pool
if hasattr(self.train_dataset, "on_batch_end"):
# The dataset may be changed after each training batch
self.train_dataset.on_batch_end(batch=batch)
def _validate(self):
data_source_lst = []
reward_extra_infos_dict: dict[str, list] = defaultdict(list)
# Lists to collect samples for the table
sample_scores = []
sample_turns = []
sample_uids = []
for test_data in self.val_dataloader:
test_batch = DataProto.from_single_dict(test_data)
if len(test_batch) < self.config.data.val_batch_size:
print(f"drop last batch in val_dataloader, len {len(test_batch)}")
break
if "uid" not in test_batch.non_tensor_batch:
test_batch.non_tensor_batch["uid"] = np.array(
[str(uuid.uuid4()) for _ in range(len(test_batch))], dtype=object
)
test_gen_batch = self._get_gen_batch(test_batch)
test_gen_batch.meta_info = {
"eos_token_id": self.tokenizer.eos_token_id,
"pad_token_id": self.tokenizer.pad_token_id,
"prompt_length": self.config.actor_rollout_ref.rollout.prompt_length,
"recompute_log_prob": False,
"do_sample": self.config.actor_rollout_ref.rollout.val_kwargs.do_sample,
"temperature": self.config.actor_rollout_ref.rollout.temperature,
"n_samples": self.config.actor_rollout_ref.rollout.n,
"validate": True,
"global_steps": self.global_steps,
}
test_gen_batch = test_gen_batch.repeat(
repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True
)
sample_uids.extend(test_gen_batch.non_tensor_batch["uid"])
# pad to be divisible by dp_size
size_divisor = self.config.env.train.num_envs * self.config.env.rollout.pipeline_stage_num
test_gen_batch_padded, pad_size = pad_dataproto_to_divisor(test_gen_batch, size_divisor)
reset_future = self._reset_envs(test_gen_batch_padded)
test_output_gen_batch_padded = self.async_rollout_manager.generate_sequences(
test_gen_batch_padded, reset_future
)
# unpad
test_output_gen_batch = unpad_dataproto(test_output_gen_batch_padded, pad_size=pad_size)
print("validation generation end")
test_batch = test_output_gen_batch
test_batch.meta_info["validate"] = True
# evaluate using reward_function
if self.val_reward_fn is None:
raise ValueError("val_reward_fn must be provided for validation.")
result = self.val_reward_fn(test_batch, return_dict=True)
reward_tensor = result["reward_tensor"]
scores = reward_tensor.sum(-1).cpu().tolist()
sample_scores.extend(scores)
reward_extra_infos_dict["reward"].extend(scores)
print(f"len reward_extra_infos_dict['reward']: {len(reward_extra_infos_dict['reward'])}")
if "reward_extra_info" in result:
for key, lst in result["reward_extra_info"].items():
reward_extra_infos_dict[key].extend(lst)
print(f"len reward_extra_infos_dict['{key}']: {len(reward_extra_infos_dict[key])}")
# collect num_turns of each prompt
if "__num_turns__" in test_batch.non_tensor_batch:
sample_turns.append(test_batch.non_tensor_batch["__num_turns__"])
data_source_lst.append(test_batch.non_tensor_batch.get("data_source", ["unknown"] * reward_tensor.shape[0]))
for key_info, lst in reward_extra_infos_dict.items():
assert len(lst) == 0 or len(lst) == len(sample_scores), f"{key_info}: {len(lst)=}, {len(sample_scores)=}"
data_sources = np.concatenate(data_source_lst, axis=0)
data_src2var2metric2val = process_validation_metrics(data_sources, sample_uids, reward_extra_infos_dict)
metric_dict = {}
for data_source, var2metric2val in data_src2var2metric2val.items():
core_var = "acc" if "acc" in var2metric2val else "reward"
for var_name, metric2val in var2metric2val.items():
n_max = max([int(name.split("@")[-1].split("/")[0]) for name in metric2val.keys()])
for metric_name, metric_val in metric2val.items():
if (
(var_name == core_var)
and any(metric_name.startswith(pfx) for pfx in ["mean", "maj", "best"])
and (f"@{n_max}" in metric_name)
):
metric_sec = "val-core"
else:
metric_sec = "val-aux"
pfx = f"{metric_sec}/{data_source}/{var_name}/{metric_name}"
metric_dict[pfx] = metric_val
if len(sample_turns) > 0:
sample_turns = np.concatenate(sample_turns)
metric_dict["val-aux/num_turns/min"] = sample_turns.min()
metric_dict["val-aux/num_turns/max"] = sample_turns.max()
metric_dict["val-aux/num_turns/mean"] = sample_turns.mean()
return metric_dict
|
verl__experimental__vla__sac__sac_ray_trainer.py
|
# Copyright 2025 The RLinf Authors.
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import logging
import os
import subprocess
from typing import Optional
import torch
import torch.multiprocessing as mp
from verl.utils.device import get_torch_device
logger = logging.getLogger(__name__)
def cleanup_device_tensors():
gc.collect()
get_torch_device().empty_cache()
def get_gpu_numa_node(gpu_id: int) -> int:
try:
try:
import pynvml
pynvml.nvmlInit()
handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
# Get PCI bus info
pci_info = pynvml.nvmlDeviceGetPciInfo(handle)
pci_bus_id = pci_info.busId
except ImportError:
# Fallback to nvidia-smi
result = subprocess.run(
[
"nvidia-smi",
"--query-gpu=pci.bus_id",
"--format=csv,noheader,nounits",
f"--id={gpu_id}",
],
capture_output=True,
text=True,
check=True,
)
pci_bus_id = result.stdout.strip()
# Extract bus number from PCI bus ID (format: 0000:XX:YY.Z)
bus_number = pci_bus_id.split(":")[1]
# Get NUMA node from sysfs
numa_node_path = f"/sys/bus/pci/devices/0000:{bus_number}:00.0/numa_node"
if os.path.exists(numa_node_path):
with open(numa_node_path) as f:
numa_node = int(f.read().strip())
if numa_node >= 0:
return numa_node
# Fallback: try to get from lscpu
result = subprocess.run(["lscpu"], capture_output=True, text=True, check=True)
numa_nodes = 0
for line in result.stdout.split("\n"):
if "NUMA node(s):" in line:
numa_nodes = int(line.split(":")[1].strip())
break
# If we can't determine the exact NUMA node, distribute evenly
return gpu_id % numa_nodes if numa_nodes > 0 else 0
except Exception as e:
logger.error(f"Warning: Could not determine NUMA node for GPU {gpu_id}: {e}")
return 0
def get_numa_cpus(numa_node: int) -> list:
try:
# Read from sysfs
cpulist_path = f"/sys/devices/system/node/node{numa_node}/cpulist"
if os.path.exists(cpulist_path):
with open(cpulist_path) as f:
cpulist = f.read().strip()
# Parse CPU list (e.g., "0-7,16-23" or "0,1,2,3")
cpus = []
for part in cpulist.split(","):
if "-" in part:
start, end = map(int, part.split("-"))
cpus.extend(range(start, end + 1))
else:
cpus.append(int(part))
return cpus
except Exception as e:
logger.error(f"Warning: Could not get CPU list for NUMA node {numa_node}: {e}")
# Fallback: return all available CPUs
return list(range(os.cpu_count() or 1))
def set_process_numa_affinity(gpu_id: int) -> None:
try:
numa_node = get_gpu_numa_node(gpu_id)
cpus = get_numa_cpus(numa_node)
if not cpus:
logger.error(f"Warning: No CPUs found for NUMA node {numa_node}")
return
os.sched_setaffinity(0, cpus)
try:
subprocess.run(
["numactl", "--membind", str(numa_node), "--"],
check=False,
capture_output=True,
)
except FileNotFoundError:
pass # numactl not available, that's ok
except Exception as e:
logger.error(f"Warning: Could not set NUMA affinity for GPU {gpu_id}: {e}")
def recursive_to_own(obj):
if isinstance(obj, torch.Tensor):
return obj.clone() if obj.is_shared() else obj
elif isinstance(obj, list):
return [recursive_to_own(elem) for elem in obj]
elif isinstance(obj, tuple):
return tuple(recursive_to_own(elem) for elem in obj)
elif isinstance(obj, dict):
return {k: recursive_to_own(v) for k, v in obj.items()}
else:
return obj
class EnvManager:
def __init__(self, cfg, rank, world_size, env_cls):
self.cfg = cfg
self.rank = rank
self.world_size = world_size
self.process: Optional[mp.Process] = None
self.command_queue: Optional[mp.Queue] = None
self.result_queue: Optional[mp.Queue] = None
self.state_buffer: Optional[bytes] = None
self.env_cls = env_cls
def start_simulator(self):
"""Start simulator process with shared memory queues"""
if self.process:
logger.info(f"Simulator process already running for rank {self.rank}")
return
self.context = mp.get_context("spawn")
# Create shared memory queues
self.command_queue = self.context.Queue()
self.result_queue = self.context.Queue()
# Start simulator process
self.process = self.context.Process(
target=_simulator_worker,
args=(
self.cfg,
self.rank,
self.world_size,
self.env_cls,
self.command_queue,
self.result_queue,
self.state_buffer,
True,
),
)
self.process.start()
# Wait for initialization
result = self.result_queue.get(timeout=180)
if result["status"] != "ready":
raise RuntimeError(f"Simulator initialization failed: {result}")
def stop_simulator(self):
if not self.process:
return
# Request state save
self.command_queue.put({"method": "get_state", "args": [], "kwargs": {}})
# Get saved state
result = self.result_queue.get(timeout=180)
if result["status"] == "success":
self.state_buffer = result["data"]
self.command_queue.put({"method": "shutdown"})
self.command_queue.close()
self.result_queue.close()
self.command_queue = None
self.result_queue = None
self.process.join(timeout=5)
self.command_queue = None
self.result_queue = None
if self.process.is_alive():
self.process.terminate()
self.process.join()
self.process = None
def __getattr__(self, name):
if name in [
"cfg",
"rank",
"world_size",
"process",
"command_queue",
"result_queue",
"state_buffer",
"env_cls",
"context",
]:
return super().__getattr__(name)
def method_proxy(*args, **kwargs):
if self.process is None or not self.process.is_alive():
raise RuntimeError("Simulator not running")
args = recursive_to_own(args)
kwargs = recursive_to_own(kwargs)
self.command_queue.put({"method": name, "args": args, "kwargs": kwargs})
result = self.result_queue.get()
result = recursive_to_own(result)
if result["status"] == "error":
raise Exception(result["error"])
return result["data"]
return method_proxy
def get_all_state_ids(self):
"""Get all available state IDs from the environment."""
if self.process is None or not self.process.is_alive():
raise RuntimeError("Simulator not running")
self.command_queue.put({"method": "get_all_state_ids", "args": [], "kwargs": {}})
result = self.result_queue.get()
result = recursive_to_own(result)
if result["status"] == "error":
raise Exception(result["error"])
return result["data"]
def reset_envs_to_state_ids(self, state_ids_list, task_ids_list):
"""Reset environments to specified state IDs."""
if self.process is None or not self.process.is_alive():
raise RuntimeError("Simulator not running")
state_ids_list = recursive_to_own(state_ids_list)
task_ids_list = recursive_to_own(task_ids_list)
self.command_queue.put(
{
"method": "reset_envs_to_state_ids",
"args": [state_ids_list, task_ids_list],
"kwargs": {},
}
)
result = self.result_queue.get()
result = recursive_to_own(result)
if result["status"] == "error":
raise Exception(result["error"])
return result["data"]
def __setattr__(self, name, value):
# Handle special attributes that should be set on self
if name in [
"cfg",
"rank",
"world_size",
"process",
"command_queue",
"result_queue",
"state_buffer",
"env_cls",
"context",
]:
super().__setattr__(name, value)
return
if self.process is None or not self.process.is_alive():
raise RuntimeError(f"Simulator not running to set attribute {name} to {value}")
value = recursive_to_own(value)
self.command_queue.put(
{
"method": "__setattr__",
"args": [name, value],
"kwargs": {},
}
)
result = self.result_queue.get()
result = recursive_to_own(result)
if result["status"] == "error":
raise Exception(result["error"])
def _simulator_worker(
cfg,
rank,
world_size,
env_cls,
command_queue,
result_queue,
state_buffer,
bind_numa=True,
):
"""Worker process for simulator"""
# Set NUMA affinity for the process to match the GPU rank
import logging
import os
pid = os.getpid()
logger = logging.getLogger(f"simulator_worker_{rank}_{pid}")
if bind_numa:
set_process_numa_affinity(rank)
try:
env = env_cls(cfg, rank, world_size)
if state_buffer:
env.load_state(state_buffer)
# Signal ready
result_queue.put({"status": "ready"})
# Main command processing loop
while True:
try:
command = command_queue.get()
logger.debug(f"Received command method: {command['method']}")
if command["method"] == "shutdown":
env.close()
break
method_name = command["method"]
args = command.get("args", [])
kwargs = command.get("kwargs", {})
if method_name == "__setattr__":
# Handle attribute setting
attr_name, attr_value = args
setattr(env, attr_name, attr_value)
result_queue.put({"status": "success", "data": None})
elif hasattr(env, method_name):
method = getattr(env, method_name)
assert callable(method), f"Method {method_name} is not callable"
result = method(*args, **kwargs)
result_queue.put({"status": "success", "data": result})
else:
logger.error(f"Method '{method_name}' not found")
result_queue.put(
{
"status": "error",
"error": f"Method '{method_name}' not found",
}
)
except Exception as e:
logger.exception(e)
result_queue.put({"status": "error", "error": str(e)})
except Exception as e:
logger.exception(e)
result_queue.put({"status": "error", "error": str(e)})
finally:
command_queue.close()
result_queue.close()
|
verl__experimental__vla__workers__env__env_manager.py
|
# Copyright 2025 The RLinf Authors.
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import torch
from omegaconf import DictConfig
from torch.distributed.device_mesh import init_device_mesh
from verl import DataProto
from verl.experimental.vla.workers.env.env_manager import EnvManager
from verl.single_controller.base import Worker
from verl.single_controller.base.decorator import Dispatch, make_nd_compute_dataproto_dispatch_fn, register
from verl.utils.config import omega_conf_to_dataclass
from verl.utils.device import (
get_device_name,
)
from verl.utils.distributed import initialize_global_process_group_ray
from verl.utils.profiler import DistProfiler, DistProfilerExtension, ProfilerConfig
def put_tensor_cpu(data_dict):
for key, value in data_dict.items():
if isinstance(value, dict):
data_dict[key] = put_tensor_cpu(value)
if isinstance(value, torch.Tensor):
data_dict[key] = value.cpu().contiguous()
return data_dict
def create_env_batch(obs, rews, dones, infos, meta=None):
ret_dict = {"obs": obs, "rews": rews, "dones": dones, "infos": infos}
if meta is not None:
ret_dict.update(meta=meta)
ret_dict = put_tensor_cpu(ret_dict)
return ret_dict
def create_env_batch_dataproto(obs, rews, terminations, truncations, infos, meta=None):
ret_dict = {"obs": obs, "rews": rews, "terminations": terminations, "truncations": truncations, "infos": infos}
if meta is not None:
ret_dict.update(meta=meta)
ret_dict = put_tensor_cpu(ret_dict)
tensor_batch = {
"full_image": ret_dict["obs"]["images_and_states"]["full_image"],
"wrist_image": ret_dict["obs"]["images_and_states"]["wrist_image"],
"state": ret_dict["obs"]["images_and_states"]["state"],
"rews": ret_dict["rews"],
"terminations": ret_dict["terminations"],
"truncations": ret_dict["truncations"],
}
non_tensor_batch = {"task_descriptions": obs["task_descriptions"]}
output = DataProto.from_dict(tensors=tensor_batch, non_tensors=non_tensor_batch)
return output
class EnvWorker(Worker, DistProfilerExtension):
def __init__(self, config: DictConfig):
Worker.__init__(self)
self.cfg = config
self.train_video_cnt = 0
self.eval_video_cnt = 0
self.simulator_list = []
self.last_obs_list = []
self.last_dones_list = []
self.eval_simulator_list = []
self.stage_num = self.cfg.rollout.pipeline_stage_num
initialize_global_process_group_ray(timeout_second=None)
device_name = get_device_name()
env_device_mesh = init_device_mesh(device_name, mesh_shape=(self.world_size, 1), mesh_dim_names=["dp", "tp"])
self._register_dispatch_collect_info("env", dp_rank=env_device_mesh["dp"].get_local_rank(), is_collect=True)
# Initialize profiler
omega_profiler_config = config.train.get("profiler", {})
profiler_config = omega_conf_to_dataclass(omega_profiler_config, dataclass_type=ProfilerConfig)
if omega_profiler_config.get("tool", None) in ["npu", "nsys", "torch", "torch_memory"]:
tool_config = omega_conf_to_dataclass(
omega_profiler_config.get("tool_config", {}).get(omega_profiler_config.get("tool"))
)
else:
tool_config = None
DistProfilerExtension.__init__(
self, DistProfiler(rank=self.rank, config=profiler_config, tool_config=tool_config)
)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
@DistProfiler.annotate(color="green", role="env_init")
def init_worker(self):
if self.cfg.train.simulator_type == "libero":
from verl.experimental.vla.envs.libero_env.libero_env import LiberoEnv
for _ in range(self.stage_num):
self.simulator_list.append(
EnvManager(
self.cfg.train,
rank=self._rank,
world_size=self._world_size,
env_cls=LiberoEnv,
)
)
elif self.cfg.train.simulator_type == "isaac":
from verl.experimental.vla.envs.isaac_env.isaac_env import IsaacEnv
for _ in range(self.stage_num):
self.simulator_list.append(
EnvManager(
self.cfg.train,
rank=self._rank,
world_size=self._world_size,
env_cls=IsaacEnv,
)
)
else:
raise NotImplementedError(f"Simulator type {self.cfg.train.simulator_type} not implemented")
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
@DistProfiler.annotate(color="green", role="env_init_simulator")
def init_simulator(self):
for i in range(self.stage_num):
self.simulator_list[i].start_simulator()
return
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="env"), blocking=False)
@DistProfiler.annotate(color="red", role="env_interact_step")
def env_interact_step(self, data: DataProto) -> dict:
"""
This function is used to interact with the environment.
"""
chunk_actions: torch.Tensor = data.non_tensor_batch["actions"]
stage_id: int = data.meta_info["stage_id"]
# Pi0.5 Libero is not required
# TODO: prepare actions according to simulator type
# chunk_actions = prepare_actions(
# simulator_type=self.cfg.train.simulator_type,
# raw_chunk_actions=chunk_actions,
# num_action_chunks=self.cfg.actor.model.num_action_chunks,
# action_dim=self.cfg.actor.model.action_dim,
# )
env_info_list = {}
extracted_obs, chunk_rewards, chunk_terminations, chunk_truncations, infos = self.simulator_list[
stage_id
].chunk_step(chunk_actions)
chunk_dones = torch.logical_or(chunk_terminations, chunk_truncations)
if chunk_dones.any():
if "final_info" in infos:
final_info = infos["final_info"]
for key in final_info["episode"]:
env_info_list[key] = final_info["episode"][key][chunk_dones[:, -1]].cpu()
env_batch = create_env_batch_dataproto(
obs=extracted_obs,
rews=chunk_rewards,
terminations=chunk_terminations,
truncations=chunk_truncations,
infos=infos,
meta=env_info_list,
)
return env_batch
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def get_all_state_ids(self):
"""Get all available state IDs from the environment."""
state_ids = self.simulator_list[0].get_all_state_ids()
return state_ids
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="env"), blocking=False)
@DistProfiler.annotate(color="blue", role="env_reset_envs_to_state_ids")
def reset_envs_to_state_ids(self, data: DataProto):
"""Reset environments to specified state IDs.
Args:
state_ids: State IDs to reset environments to
"""
state_ids_list = list(data.non_tensor_batch["state_ids"])
task_ids_list = list(data.non_tensor_batch["task_ids"])
assert len(state_ids_list) == self.cfg.train.num_envs * self.stage_num, (
f"state_ids_list length is {len(state_ids_list)}, but should be {self.cfg.train.num_envs * self.stage_num}"
)
result_list = []
for stage_id in range(self.stage_num):
if self.cfg.train.simulator_type == "isaac":
assert (
len(
set(
state_ids_list[
stage_id * self.cfg.train.num_envs : (stage_id + 1) * self.cfg.train.num_envs
]
)
)
== 1
), "rollout.n should equal to num_envs for isaac"
result = self.simulator_list[stage_id].reset_envs_to_state_ids(
state_ids_list[stage_id * self.cfg.train.num_envs : (stage_id + 1) * self.cfg.train.num_envs],
task_ids_list[stage_id * self.cfg.train.num_envs : (stage_id + 1) * self.cfg.train.num_envs],
)
result_list.append(result)
output_tensor_dict = {}
output_non_tensor_dict = {}
# Handle nested 'images_and_states'
images_and_states_list = [d[0]["images_and_states"] for d in result_list]
if images_and_states_list:
# Assuming all dicts in the list have the same keys
for k in images_and_states_list[0].keys():
if isinstance(images_and_states_list[0][k], torch.Tensor):
output_tensor_dict[k] = torch.cat([d[k] for d in images_and_states_list])
# Handle 'task_descriptions'
task_descriptions_list = [d[0]["task_descriptions"] for d in result_list]
output_non_tensor_dict["task_descriptions"] = list(itertools.chain.from_iterable(task_descriptions_list))
output = DataProto.from_dict(tensors=output_tensor_dict, non_tensors=output_non_tensor_dict)
return output
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
@DistProfiler.annotate(color="gray", role="env_finish_rollout")
def finish_rollout(self, mode="train"):
# reset
if mode == "train":
if self.cfg.train.video_cfg.save_video:
for i in range(self.stage_num):
self.simulator_list[i].flush_video(video_sub_dir=f"stage_{i}")
|
verl__experimental__vla__workers__env__env_worker.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional
from uuid import uuid4
class BaseInteraction:
def __init__(self, config: dict[str, Any]):
self.config = config
self.name: str = config.get("name", "interaction_agent") # More general agent default role name
async def start_interaction(self, instance_id: Optional[str] = None, **kwargs) -> str:
"""Create a tool instance.
Args:
instance_id: The instance id of the tool.
Returns:
The instance id of the tool.
"""
if instance_id is None:
return str(uuid4())
else:
return instance_id
async def generate_response(
self, instance_id: str, messages: list[dict[str, Any]], **kwargs
) -> tuple[bool, str, float, dict[str, Any]]: # More clear response generation method
"""
Generates a response for the current turn of interaction.
Returns a tuple containing:
- should_terminate_sequence (bool): True if the interaction sequence should end.
- response_content (str): The textual content of the response.
- current_turn_score (float): The score for this specific turn/response.
- additional_data (dict): Any extra information or metadata.
"""
should_terminate_sequence: bool = False # if True, end rollout
response_content: str = "Your current result seems acceptable."
current_turn_score: float = 0.8
additional_data: dict[str, Any] = {}
return should_terminate_sequence, response_content, current_turn_score, additional_data
async def calculate_score(self) -> float: # More clear score calculation method
"""
Calculates a score for the interaction,
potentially considering aspects like partial exposure & in-context task switching.
should be invoke at turn-level
"""
# ...implement the logic to calculate turn-level score...
score = 0.0
return score
async def finalize_interaction(self) -> None: # More clear interaction end and resource release method
"""
Finalizes the interaction session and releases any associated state or resources.
Simulates: release state
"""
# ...implement the logic to release state...
pass
|
verl__interactions__base.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import Any, Optional
from uuid import uuid4
from verl.utils.reward_score import gsm8k
from .base import BaseInteraction
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class Gsm8kInteraction(BaseInteraction):
"""A demo interaction for calculating the reward of gsm8k.
- `start_interaction`: start a interaction instance for a trajectory.
- `generate_response`: generate the response of the assistant.
- `calculate_score`: calculate the score of the interaction.
- `finalize_interaction`: finalize the interaction instance.
"""
def __init__(self, config: dict):
super().__init__(config)
self._instance_dict = {}
async def start_interaction(
self, instance_id: Optional[str] = None, ground_truth: Optional[str] = None, **kwargs
) -> str:
if instance_id is None:
instance_id = str(uuid4())
self._instance_dict[instance_id] = {
"response": "",
"ground_truth": ground_truth,
"reward": 0.0,
}
return instance_id
async def generate_response(
self, instance_id: str, messages: list[dict[str, Any]], **kwargs
) -> tuple[bool, str, float, dict]:
content = ""
for i in range(len(messages) - 1, -1, -1):
item = messages[i]
if item.get("role") == "assistant":
content = item.get("content")
break
self._instance_dict[instance_id]["response"] = content
reward = await self.calculate_score(instance_id)
if reward == 1.0:
response = "Your response is correct!"
should_terminate_sequence = True
else:
response = "Your response is incorrect! You need to reflect on your answer and try again."
should_terminate_sequence = False
return should_terminate_sequence, response, reward, {}
async def calculate_score(self, instance_id: str, **kwargs) -> float:
return gsm8k.compute_score(
self._instance_dict[instance_id]["response"],
self._instance_dict[instance_id]["ground_truth"],
method="strict",
format_score=0.0,
score=1.0,
)
async def finalize_interaction(self, instance_id: str, **kwargs) -> None:
del self._instance_dict[instance_id]
|
verl__interactions__gsm8k_interaction.py
|
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.util
import logging
import os
import sys
from omegaconf import OmegaConf
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def get_interaction_class(cls_name):
"""Dynamically import and return the interaction class."""
module_name, class_name = cls_name.rsplit(".", 1)
if module_name not in sys.modules:
spec = importlib.util.find_spec(module_name)
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
else:
module = sys.modules[module_name]
interaction_cls = getattr(module, class_name)
return interaction_cls
def initialize_interactions_from_config(interaction_config_file):
"""Initialize interactions from configuration file.
Args:
interaction_config_file: Path to the interaction configuration file.
Returns:
dict: A dictionary mapping interaction names to BaseInteraction instances.
"""
interaction_config = OmegaConf.load(interaction_config_file)
interaction_map = {}
for interaction_item in interaction_config.interaction:
cls_name = interaction_item.class_name
interaction_cls = get_interaction_class(cls_name)
# Extract config and name
config = OmegaConf.to_container(interaction_item.config, resolve=True)
# Get the interaction name - either from config or derive from class name
name = interaction_item.get("name", None)
if name is None:
# If no name is specified, use the class name as default
class_simple_name = cls_name.split(".")[-1]
# Remove "Interaction" suffix if present, otherwise use full class name
if class_simple_name.endswith("Interaction"):
name = class_simple_name[:-11].lower() # Remove "Interaction" (11 chars)
else:
name = class_simple_name.lower()
# Check for duplicate names
if name in interaction_map:
raise ValueError(f"Duplicate interaction name '{name}' found. Each interaction must have a unique name.")
# Inject the name into the config
config["name"] = name
# Create the interaction instance
interaction = interaction_cls(config=config)
interaction_map[name] = interaction
logger.info(f"Initialized interaction '{name}' with class '{cls_name}'")
return interaction_map
|
verl__interactions__utils__interaction_registry.py
|
# Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import Any, Optional
from uuid import uuid4
from .base import BaseInteraction
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class WeatherInteraction(BaseInteraction):
"""A demo interaction for handling weather-related queries.
- `start_interaction`: start a interaction instance for a trajectory.
- `generate_response`: generate the response of the assistant.
- `calculate_score`: calculate the score of the interaction.
- `finalize_interaction`: finalize the interaction instance.
"""
def __init__(self, config: dict):
super().__init__(config)
self._instance_dict = {}
async def start_interaction(
self, instance_id: Optional[str] = None, ground_truth: Optional[str] = None, **kwargs
) -> str:
if instance_id is None:
instance_id = str(uuid4())
self._instance_dict[instance_id] = {
"response": "",
"ground_truth": ground_truth,
"reward": 0.0,
}
return instance_id
async def generate_response(
self, instance_id: str, messages: list[dict[str, Any]], **kwargs
) -> tuple[bool, str, float, dict]:
content = "no tool call"
for i in range(len(messages) - 1, -1, -1):
item = messages[i]
if item.get("role") == "tool":
content = item.get("content")
break
self._instance_dict[instance_id]["response"] = content
reward = await self.calculate_score(instance_id)
if reward == 1.0:
response = "Thank you for your weather query!"
should_terminate_sequence = True
else:
response = "Please use the weather tool to get the weather information."
should_terminate_sequence = True
return should_terminate_sequence, response, reward, {}
async def calculate_score(self, instance_id: str, **kwargs) -> float:
# For weather interaction, we can implement a more complex scoring logic
# For now, we'll just return a default score of 1.0
if self._instance_dict[instance_id]["response"] == "no tool call":
return 0.0
return 1.0
async def finalize_interaction(self, instance_id: str, **kwargs) -> None:
del self._instance_dict[instance_id]
|
verl__interactions__weather_interaction.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module is used to merge huggingface model and test verl checkpoints from FSDP and Megatron backends.
To merge FSDP checkpoints:
```sh
python -m verl.model_merger merge \
--backend fsdp \
--local_dir checkpoints/verl_fsdp_gsm8k_examples/qwen2_5_0b5_fsdp_saveload/global_step_1/actor \
--target_dir /path/to/merged_hf_model
```
To merge Megatron checkpoints:
```sh
python -m verl.model_merger merge \
--backend megatron \
--tie-word-embedding \
--local_dir checkpoints/verl_megatron_gsm8k_examples/qwen2_5_0b5_megatron_saveload/global_step_1/actor \
--target_dir /path/to/merged_hf_model
```
or use distribtued merge for large models like dpskv3 671B
```sh
torchrun --nproc_per_node 1 --nnodes 8 --node_rank ${RANK} -m verl.model_merger merge\
--backend megatron \
--local_dir ./checkpoints/global_step_1/actor \
--target_dir /path/to/merged_hf_model
```
For more details, please refer to documentation:
https://verl.readthedocs.io/en/latest/advance/checkpoint.html#convert-fsdp-and-megatron-checkpoints-to-huggingface-format-model
"""
from .base_model_merger import generate_config_from_args, parse_args
def main():
args = parse_args()
config = generate_config_from_args(args)
print(f"config: {config}")
if config.backend == "fsdp":
from .fsdp_model_merger import FSDPModelMerger
merger = FSDPModelMerger(config)
elif config.backend == "megatron":
from .megatron_model_merger import MegatronModelMerger
merger = MegatronModelMerger(config)
else:
raise NotImplementedError(f"Unknown backend: {config.backend}")
merger.merge_and_save()
merger.cleanup()
if __name__ == "__main__":
main()
|
verl__model_merger____main__.py
|
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Optional
import torch
from accelerate import init_empty_weights
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoModelForTokenClassification,
GenerationConfig,
)
from verl.utils import hf_processor, hf_tokenizer
def parse_args():
parser = argparse.ArgumentParser(description="verl model merger")
subparsers = parser.add_subparsers(dest="operation", required=True, help="Specify 'merge' or 'test' operation.")
base_op_parser = argparse.ArgumentParser(add_help=False)
base_op_parser.add_argument(
"--backend", type=str, required=True, choices=["fsdp", "megatron"], help="The backend of the model"
)
base_op_parser.add_argument("--local_dir", type=str, default=None, help="Path to the saved model checkpoints.")
base_op_parser.add_argument(
"--tie-word-embedding",
action="store_true",
help="Whether to tie word embedding weights (currently only Megatron supported)",
)
base_op_parser.add_argument("--trust-remote-code", action="store_true", help="Whether to trust remote code")
base_op_parser.add_argument(
"--is-value-model",
action="store_true",
help="Whether the model is a value model (currently only Megatron supported)",
)
base_op_parser.add_argument(
"--use_cpu_initialization",
action="store_true",
help="Whether to use CPU initialization for the model. This is useful for large models that cannot "
"fit into GPU memory during initialization.",
)
merge_parser = subparsers.add_parser("merge", parents=[base_op_parser], help="Merge model checkpoints and save.")
merge_parser.add_argument(
"--target_dir", default="tmp", type=str, help="Directory to save the merged huggingface model"
)
merge_parser.add_argument(
"--hf_upload_path", default=None, type=str, help="Hugging Face repository ID to upload the model"
)
merge_parser.add_argument(
"--private", action="store_true", help="Whether to upload the model to a private Hugging Face repository"
)
test_parser = subparsers.add_parser(
"test", parents=[base_op_parser], help="Test merged model against a reference Hugging Face model"
)
test_parser.add_argument(
"--test_hf_dir", type=str, required=True, help="Path to the reference Hugging Face model directory for testing"
)
args = parser.parse_args()
return args
@dataclass
class ModelMergerConfig:
"""Configuration for model merger operations.
Args:
operation (str): Operation type - 'merge' or 'test'.
backend (str): Backend type for the model ('fsdp' or 'megatron').
target_dir (Optional[str]): Directory to save the merged huggingface model. Defaults to "tmp".
hf_upload_path (Optional[str]): Hugging Face repository ID to upload the model. Defaults to None.
private (bool): Whether to upload the model to a private Hugging Face repository. Defaults to False.
test_hf_dir (Optional[str]): Path to the reference Hugging Face model directory for testing. Defaults to None.
tie_word_embedding (bool): Whether to tie word embedding weights (currently only Megatron
supported). Defaults to False.
trust_remote_code (bool): Whether to trust remote code. Defaults to False.
is_value_model (bool): Whether the model is a value model (currently only Megatron
supported). Defaults to False.
local_dir (Optional[str]): Path to the saved model checkpoints. Defaults to None.
hf_model_config_path (Optional[str]): Path to HuggingFace model configuration files. Defaults to None.
hf_upload (bool): Whether to upload to HuggingFace (computed automatically). Not for initialization.
use_cpu_initialization (bool): Whether to use CPU initialization for large models. Defaults to False.
"""
operation: str # 'merge' or 'test'
backend: str
target_dir: Optional[str] = "tmp"
hf_upload_path: Optional[str] = None
private: bool = False
test_hf_dir: Optional[str] = None
tie_word_embedding: bool = False
trust_remote_code: bool = False
is_value_model: bool = False
local_dir: Optional[str] = None
hf_model_config_path: Optional[str] = None
hf_upload: bool = field(init=False)
use_cpu_initialization: bool = False
def __post_init__(self):
self.hf_upload = self.operation == "merge" and bool(self.hf_upload_path)
if self.operation == "test":
self.target_dir = None
self.hf_upload_path = None
self.private = False
def generate_config_from_args(args: argparse.Namespace) -> ModelMergerConfig:
common_config_args = {
"operation": args.operation,
"backend": args.backend,
"tie_word_embedding": args.tie_word_embedding,
"trust_remote_code": args.trust_remote_code,
"is_value_model": args.is_value_model,
"local_dir": args.local_dir,
"hf_model_config_path": os.path.join(args.local_dir, "huggingface"),
"use_cpu_initialization": args.use_cpu_initialization,
}
if args.operation == "merge":
config = ModelMergerConfig(
**common_config_args,
target_dir=args.target_dir,
hf_upload_path=args.hf_upload_path,
private=args.private,
test_hf_dir=None,
)
os.makedirs(config.target_dir, exist_ok=True)
elif args.operation == "test":
config = ModelMergerConfig(
**common_config_args,
test_hf_dir=args.test_hf_dir,
# the following args are not used by test operation
target_dir=None,
hf_upload_path=None,
private=False,
)
else:
raise NotImplementedError(f"Unknown operation: {args.operation}")
return config
class BaseModelMerger(ABC):
"""
Abstract base class for merging distributed model checkpoints into HuggingFace format.
This class provides common functionality for converting model checkpoints from different
distributed training backends (FSDP, Megatron) into standard HuggingFace format that
can be easily loaded and used for inference or further training.
The merger supports two main operations:
- merge: Convert and save checkpoints to HuggingFace format
- test: Validate merged checkpoints against a reference model
Args:
config (ModelMergerConfig): Configuration object containing paths, backend type,
and operation parameters.
Attributes:
config (ModelMergerConfig): The configuration object passed during initialization.
hf_model_config_path (str): Path to the HuggingFace model configuration files.
model_config (PretrainedConfig): Loaded HuggingFace model configuration.
"""
def __init__(self, config: ModelMergerConfig):
self.config = config
self.hf_model_config_path = config.hf_model_config_path
self.model_config = AutoConfig.from_pretrained(
self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code
)
def get_transformers_auto_model_class(self):
has_remote_code = hasattr(self.model_config, "auto_map") and any(
self.model_config.architectures[0] in val for val in self.model_config.auto_map.values()
)
if has_remote_code:
auto_class = next(
k for k, v in self.model_config.auto_map.items() if self.model_config.architectures[0] in v
)
match auto_class:
case "AutoModelForCausalLM":
return AutoModelForCausalLM
case "AutoModelForTokenClassification":
return AutoModelForTokenClassification
case "AutoModelForVision2Seq":
# Handle different transformers versions for Vision2Seq models
import transformers
from packaging import version
if version.parse(transformers.__version__) >= version.parse("4.54.0"):
# transformers >= 4.54.0 uses AutoModelForImageTextToText
from transformers import AutoModelForImageTextToText
return AutoModelForImageTextToText
else:
# transformers < 4.54.0 uses AutoModelForVision2Seq
from transformers import AutoModelForVision2Seq
return AutoModelForVision2Seq
case _:
raise NotImplementedError(f"Unknown auto class {auto_class}")
else:
if "ForTokenClassification" in self.model_config.architectures[0]:
return AutoModelForTokenClassification
elif "ForCausalLM" in self.model_config.architectures[0]:
return AutoModelForCausalLM
elif "ForConditionalGeneration" in self.model_config.architectures[0]:
return AutoModelForVision2Seq
raise NotImplementedError(f"Unknown architecture {self.model_config.architectures}")
def patch_model_generation_config(self, model):
"""
The generation_config created from model config may be different to the pretrained model,
this may lead to error when generating: https://github.com/volcengine/verl/issues/1246
This function patch the generation_config created from model config to the pretrained model.
"""
if model.can_generate():
try:
model.generation_config = GenerationConfig.from_pretrained(self.hf_model_config_path)
except OSError:
print(
f"Warning: Generation config file not found in {self.hf_model_config_path}, using a "
f"generation config created from the model config."
)
return model
def save_lora_adapter(self, state_dict: dict[str, torch.Tensor]):
"""
Save lora adapter to safetensors.
Returns:
lora_path: str, the path to the lora adapter. None if no lora adapter found.
Note:
This function change the 'state_dict' in place.
"""
lora_params_names = [name for name in state_dict.keys() if "lora_" in name]
if len(lora_params_names) == 0:
return None
import json
from typing import OrderedDict
import peft
from safetensors.torch import save_file
lora_params = OrderedDict()
target_modules = set()
lora_key = None
for name in lora_params_names:
lora_key = name.replace(".default.weight", ".weight")
target_modules.add(lora_key.split(".")[-3])
lora_params[lora_key] = state_dict.pop(name)
lora_rank = min(lora_params[lora_key].shape[0], lora_params[lora_key].shape[1])
peft_dict = {
"r": lora_rank,
"lora_alpha": 0, # lora_alpha is not set. An error should be raised to inform the user to set it manually.
"target_modules": list(target_modules),
}
peft_config = peft.LoraConfig(**peft_dict).to_dict()
peft_config["task_type"] = peft_config["task_type"].value if peft_config["task_type"] else None
peft_config["peft_type"] = peft_config["peft_type"].value if peft_config["peft_type"] else None
peft_config["target_modules"] = list(peft_config["target_modules"])
lora_path = os.path.join(self.config.target_dir, "lora_adapter")
os.makedirs(lora_path, exist_ok=True)
with open(os.path.join(lora_path, "adapter_config.json"), "w", encoding="utf-8") as f:
json.dump(peft_config, f, ensure_ascii=False, indent=4)
save_file(lora_params, os.path.join(lora_path, "adapter_model.safetensors"))
for name in list(state_dict.keys()):
key = (
name.replace("base_model.model.", "")
.replace(".base_layer.weight", ".weight")
.replace(".base_layer.bias", ".bias")
)
state_dict[key] = state_dict.pop(name)
return lora_path
def save_hf_model_and_tokenizer(self, state_dict: dict[str, torch.Tensor]):
auto_model_class = self.get_transformers_auto_model_class()
with init_empty_weights():
model = auto_model_class.from_config(
self.model_config, torch_dtype=torch.bfloat16, trust_remote_code=self.config.trust_remote_code
)
model.to_empty(device="cpu")
model = self.patch_model_generation_config(model)
lora_path = self.save_lora_adapter(state_dict)
if lora_path:
print(f"Saving lora adapter to {lora_path}")
print(f"Saving model to {self.config.target_dir}")
model.save_pretrained(self.config.target_dir, state_dict=state_dict)
del state_dict
del model
processor = hf_processor(self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code)
tokenizer = hf_tokenizer(self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code)
if processor is not None:
print(f"Saving processor to {self.config.target_dir}")
processor.save_pretrained(self.config.target_dir)
if tokenizer is not None:
print(f"Saving tokenizer to {self.config.target_dir}")
tokenizer.save_pretrained(self.config.target_dir)
def upload_to_huggingface(self):
import requests
from huggingface_hub import HfApi
from huggingface_hub.utils import HfHubHTTPError, RepositoryNotFoundError
api = HfApi()
try:
# Attempt to create repository
api.create_repo(repo_id=self.config.hf_upload_path, private=self.config.private, exist_ok=True)
except HfHubHTTPError as e:
# Handle authentication/API errors
if e.response.status_code == 401:
raise PermissionError(
"Hugging Face authentication failed. Verify your token is valid and has write permissions."
) from e
elif e.response.status_code == 404:
raise RepositoryNotFoundError(f"Repository path not found: {self.config.hf_upload_path}") from e
else:
raise ConnectionError(f"Failed to create repository ({e.response.status_code}): {e}") from e
except requests.exceptions.ConnectionError as e:
raise ConnectionError("Network connection failed. Check your internet connection.") from e
try:
# Attempt folder upload
api.upload_folder(folder_path=self.config.target_dir, repo_id=self.config.hf_upload_path, repo_type="model")
except HfHubHTTPError as e:
if e.response.status_code == 401:
raise PermissionError("Authentication failed during upload. Token may have expired.") from e
else:
raise RuntimeError(f"Upload failed ({e.response.status_code}): {e}") from e
except requests.exceptions.ConnectionError as e:
raise ConnectionError("Network interruption during upload. Try again with stable connection.") from e
except OSError as e:
raise FileNotFoundError(f"Local folder error: {self.config.target_dir} - {str(e)}") from e
except Exception as e:
raise RuntimeError(f"Unexpected error during upload: {str(e)}") from e
@abstractmethod
def merge_and_save(self):
raise NotImplementedError("Subclasses should implement this method")
@abstractmethod
def cleanup(self):
raise NotImplementedError("Subclasses should implement this method to clean up resources if needed")
|
verl__model_merger__base_model_merger.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.