init param

This commit is contained in:
游雁 2024-02-28 20:24:32 +08:00
parent 811ebea5b0
commit a7532ceca9
4 changed files with 11 additions and 7 deletions

View File

@ -193,7 +193,7 @@ class AutoModel:
path=init_param, path=init_param,
ignore_init_mismatch=kwargs.get("ignore_init_mismatch", False), ignore_init_mismatch=kwargs.get("ignore_init_mismatch", False),
oss_bucket=kwargs.get("oss_bucket", None), oss_bucket=kwargs.get("oss_bucket", None),
scope_map=kwargs.get("scope_map", "module.,None"), scope_map=kwargs.get("scope_map", []),
excludes=kwargs.get("excludes", None), excludes=kwargs.get("excludes", None),
) )
else: else:

View File

@ -105,7 +105,7 @@ def main(**kwargs):
path=p, path=p,
ignore_init_mismatch=kwargs.get("ignore_init_mismatch", True), ignore_init_mismatch=kwargs.get("ignore_init_mismatch", True),
oss_bucket=kwargs.get("oss_bucket", None), oss_bucket=kwargs.get("oss_bucket", None),
scope_map=kwargs.get("scope_map", "module.,none"), scope_map=kwargs.get("scope_map", []),
excludes=kwargs.get("excludes", None), excludes=kwargs.get("excludes", None),
) )
else: else:

View File

@ -315,7 +315,7 @@ class LLMASRNAR(nn.Module):
model_outputs = self.llm(inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=None) model_outputs = self.llm(inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=None)
preds = torch.argmax(model_outputs.logits, -1) preds = torch.argmax(model_outputs.logits, -1)
text = tokenizer.batch_decode(preds, add_special_tokens=False, skip_special_tokens=True) text = tokenizer.batch_decode(preds, add_special_tokens=False, skip_special_tokens=True)
text = text.split(': "\n')[-1] text = text.split(': \n')[-1]
# preds = torch.argmax(model_outputs.logits, -1) # preds = torch.argmax(model_outputs.logits, -1)
ibest_writer = None ibest_writer = None

View File

@ -45,7 +45,7 @@ def load_pretrained_model(
ignore_init_mismatch: bool=True, ignore_init_mismatch: bool=True,
map_location: str = "cpu", map_location: str = "cpu",
oss_bucket=None, oss_bucket=None,
scope_map="module.:none", scope_map=[],
excludes=None, excludes=None,
ignore_mismatch=False, ignore_mismatch=False,
**kwargs, **kwargs,
@ -75,6 +75,7 @@ def load_pretrained_model(
if isinstance(scope_map, str): if isinstance(scope_map, str):
scope_map = scope_map.split(",") scope_map = scope_map.split(",")
scope_map += ["module.", "None"]
for k in dst_state.keys(): for k in dst_state.keys():
@ -87,8 +88,11 @@ def load_pretrained_model(
src_prefix = scope_map[i] if scope_map[i].lower() != "none" else "" src_prefix = scope_map[i] if scope_map[i].lower() != "none" else ""
dst_prefix = scope_map[i+1] if scope_map[i+1].lower() != "none" else "" dst_prefix = scope_map[i+1] if scope_map[i+1].lower() != "none" else ""
if k.startswith(dst_prefix) and k.replace(dst_prefix, src_prefix) in src_state.keys(): if dst_prefix == "" and (src_prefix + k) in src_state.keys():
k_src = k.replace(dst_prefix, src_prefix) k_src = src_prefix + k
print(f"init param, map: {k} from {k_src} in ckpt")
elif k.startswith(dst_prefix) and k.replace(dst_prefix, src_prefix, 1) in src_state.keys():
k_src = k.replace(dst_prefix, src_prefix, 1)
print(f"init param, map: {k} from {k_src} in ckpt") print(f"init param, map: {k} from {k_src} in ckpt")
if k_src in src_state.keys(): if k_src in src_state.keys():