[{"data":1,"prerenderedAt":413},["ShallowReactive",2],{"content-/zh/advanced-tutorial/controlnet-workflow":3},{"id":4,"title":5,"body":6,"description":406,"extension":407,"meta":408,"navigation":362,"path":409,"seo":410,"stem":411,"__hash__":412},"content/zh/advanced-tutorial/controlnet-workflow.md","ControlNet 完全精通：从基础原理到 Flux & Turbo 最新方案",{"type":7,"value":8,"toc":390},"minimark",[9,13,30,33,38,49,53,64,114,116,120,126,176,178,182,190,230,232,236,247,251,258,276,280,287,305,307,311,314,345,347,351],[10,11,5],"h1",{"id":12},"controlnet-完全精通从基础原理到-flux-turbo-最新方案",[14,15,16,17,21,22,25,26,29],"p",{},"ControlNet 是一种神经网络架构，通过",[18,19,20],"strong",{},"添加额外控制条件","来约束扩散模型。提示词负责告诉 AI ",[18,23,24],{},"画什么","，而 ControlNet 负责告诉 AI ",[18,27,28],{},"在哪里画、怎么画","。",[31,32],"hr",{},[34,35,37],"h2",{"id":36},"_1-核心概念空间控制-vs-语义控制","1. 核心概念：空间控制 vs 语义控制",[14,39,40,41,44,45,48],{},"普通提示词（CLIP）提供的是",[18,42,43],{},"语义控制","，负责图像的“内容与含义”。\nControlNet 提供的是",[18,46,47],{},"空间控制","，负责图像的结构、深度、姿态与边界。",[50,51,52],"h3",{"id":52},"在工作流中的工作原理",[14,54,55,56,59,60,63],{},"ControlNet 位于",[18,57,58],{},"正面提示条件","与",[18,61,62],{},"采样器","之间。\n在去噪过程中，它会向模型的 UNet 或 Transformer 模块“注入”结构信息。",[65,66,67,80],"table",{},[68,69,70],"thead",{},[71,72,73,77],"tr",{},[74,75,76],"th",{},"组件",[74,78,79],{},"作用",[81,82,83,94,104],"tbody",{},[71,84,85,91],{},[86,87,88],"td",{},[18,89,90],{},"预处理器（Preprocessor）",[86,92,93],{},"将原图转换成控制图（如边缘检测、深度图）",[71,95,96,101],{},[86,97,98],{},[18,99,100],{},"ControlNet 模型",[86,102,103],{},"负责理解并应用控制图的权重文件",[71,105,106,111],{},[86,107,108],{},[18,109,110],{},"Conditioning",[86,112,113],{},"最终发送给 KSampler 的控制数据",[31,115],{},[34,117,119],{"id":118},"_2-必备-controlnet-类型","2. 必备 ControlNet 类型",[14,121,122,123,29],{},"使用 ControlNet 必须遵守一条原则：\n",[18,124,125],{},"预处理器 必须与 ControlNet 模型匹配",[127,128,129,139,148,157,166],"ol",{},[130,131,132,135,136,29],"li",{},[18,133,134],{},"Canny/Lineart（边缘/线稿）","\n提取轮廓，最适合",[18,137,138],{},"严格保留剪影与结构",[130,140,141,144,145,29],{},[18,142,143],{},"Depth（深度）","\n生成深度图，控制",[18,146,147],{},"3D 空间、透视、前后关系",[130,149,150,153,154,29],{},[18,151,152],{},"SoftEdge/HED（软边缘）","\n更柔和的边缘检测，给 AI ",[18,155,156],{},"更多纹理与风格自由度",[130,158,159,162,163,29],{},[18,160,161],{},"OpenPose（姿态）","\n提取人体骨骼关键点，用于",[18,164,165],{},"精准控制角色姿势",[130,167,168,171,172,175],{},[18,169,170],{},"IP-Adapter","（常与 ControlNet 一起使用）\n用",[18,173,174],{},"参考图","作为视觉提示，而非空间结构控制。",[31,177],{},[34,179,181],{"id":180},"_3-关键参数精通","3. 关键参数精通",[14,183,184,185,189],{},"在 ComfyUI 的 ",[186,187,188],"code",{},"ControlNet Apply"," 节点中，三个滑块决定最终效果：",[191,192,193,207,217],"ul",{},[130,194,195,198,199,202,203,206],{},[18,196,197],{},"Strength（控制强度）","\n控制 ControlNet 的影响力。\n专业技巧：",[186,200,201],{},"1.0"," 是标准值，但 ",[186,204,205],{},"0.6–0.8"," 通常画面更自然。",[130,208,209,212,213,216],{},[18,210,211],{},"Start Percent（开始百分比）","\n控制从第几步开始生效。\n设为 ",[186,214,215],{},"0.0"," 表示从第一步就固定整体结构。",[130,218,219,222,223,226,227,29],{},[18,220,221],{},"End Percent（结束百分比）","\n控制在第几步停止约束。\n设为 ",[186,224,225],{},"0.7"," 可以让 AI 在最后 30% 步骤",[18,228,229],{},"自由补充细节、优化画质",[31,231],{},[34,233,235],{"id":234},"_4-最新模型适配flux-turbo","4. 最新模型适配：Flux & Turbo",[14,237,238,239,242,243,246],{},"现代模型如 ",[18,240,241],{},"Flux.1","、",[18,244,245],{},"Turbo/Lightning"," 与旧版 SD1.5/SDXL 用法完全不同。",[50,248,250],{"id":249},"flux1-controlnet新一代标准","Flux.1 ControlNet（新一代标准）",[14,252,253,254,257],{},"Flux 使用 ",[18,255,256],{},"DiT（Diffusion Transformer）"," 架构。\n对应的 ControlNet（如 X-Labs、InstantX 出品）更大、精度更高。",[191,259,260,266],{},[130,261,262,265],{},[18,263,264],{},"Union ControlNet（一体化模型）","\n一个模型即可支持 Depth、Canny、Blur 等多种控制。",[130,267,268,271,272,275],{},[18,269,270],{},"Guidance Scale","\nFlux 对引导值非常敏感。使用 ControlNet 时建议保持在 ",[186,273,274],{},"3.5–4.0","，避免颜色过饱和、画面僵硬。",[50,277,279],{"id":278},"z-image-turbo-lightning","Z-Image-Turbo & Lightning",[14,281,282,283,286],{},"极速模型（Turbo / LCM / Lightning）只需要 ",[18,284,285],{},"1–8 步","生成。",[191,288,289,295],{},[130,290,291,294],{},[18,292,293],{},"问题","：标准 ControlNet 对 4 步生成来说“太重”。",[130,296,297,300,301,304],{},[18,298,299],{},"解决","：把 ControlNet 强度降到 ",[18,302,303],{},"0.4–0.6","。\n步数太少，高强度会导致画面扁平、对比度过高。",[31,306],{},[34,308,310],{"id":309},"_5-comfyui-高级工作流逻辑","5. ComfyUI 高级工作流逻辑",[14,312,313],{},"专业级 ControlNet 流程建议按以下顺序搭建：",[127,315,316,322,328,334,339],{},[130,317,318,321],{},[18,319,320],{},"Image Load","：加载参考图",[130,323,324,327],{},[18,325,326],{},"Preprocessor Node","：预处理（如 Canny Edge Detector）",[130,329,330,333],{},[18,331,332],{},"ControlNet Loader","：加载对应模型（如 flux-canny-controlnet-v1）",[130,335,336,338],{},[18,337,188],{},"：将正面提示词连接到 Conditioning",[130,340,341,344],{},[18,342,343],{},"KSampler","：将处理后的条件输入采样器",[31,346],{},[34,348,350],{"id":349},"_6-高质量出图总结清单","6. 高质量出图总结清单",[191,352,355,369,381],{"className":353},[354],"contains-task-list",[130,356,359,364,365,368],{"className":357},[358],"task-list-item",[360,361],"input",{"disabled":362,"type":363},true,"checkbox"," ",[18,366,367],{},"分辨率匹配","：ControlNet 图与隐空间图像的比例必须一致",[130,370,372,364,374,377,378],{"className":371},[358],[360,373],{"disabled":362,"type":363},[18,375,376],{},"不要过度约束","：同时使用 3 个及以上 ControlNet 容易导致画面僵硬、细节爆炸，建议只用 ",[18,379,380],{},"1–2 个",[130,382,384,364,386,389],{"className":383},[358],[360,385],{"disabled":362,"type":363},[18,387,388],{},"检查预处理图","：一定要预览预处理器输出。如果边缘图杂乱，最终图像也会杂乱",{"title":391,"searchDepth":392,"depth":392,"links":393},"",2,[394,398,399,400,404,405],{"id":36,"depth":392,"text":37,"children":395},[396],{"id":52,"depth":397,"text":52},3,{"id":118,"depth":392,"text":119},{"id":180,"depth":392,"text":181},{"id":234,"depth":392,"text":235,"children":401},[402,403],{"id":249,"depth":397,"text":250},{"id":278,"depth":397,"text":279},{"id":309,"depth":392,"text":310},{"id":349,"depth":392,"text":350},"ControlNet 是一种神经网络架构，通过添加额外控制条件来约束扩散模型。提示词负责告诉 AI 画什么，而 ControlNet 负责告诉 AI 在哪里画、怎么画。","md",{},"/zh/advanced-tutorial/controlnet-workflow",{"title":5,"description":406},"zh/advanced-tutorial/controlnet-workflow","EWt_xeCRVf9XDIdH5P8fFurxZ5eXDmbJsTNq76SMjrY",1773986044745]