diff --git a/invokeai/app/invocations/flux_control_lora_loader.py b/invokeai/app/invocations/flux_control_lora_loader.py
index f1519deb9e1..c61f1d32632 100644
--- a/invokeai/app/invocations/flux_control_lora_loader.py
+++ b/invokeai/app/invocations/flux_control_lora_loader.py
@@ -24,7 +24,7 @@ class FluxControlLoRALoaderOutput(BaseInvocationOutput):
title="Flux Control LoRA",
tags=["lora", "model", "flux"],
category="model",
- version="1.0.0",
+ version="1.1.0",
classification=Classification.Prototype,
)
class FluxControlLoRALoaderInvocation(BaseInvocation):
@@ -34,6 +34,7 @@ class FluxControlLoRALoaderInvocation(BaseInvocation):
description=FieldDescriptions.control_lora_model, title="Control LoRA", ui_type=UIType.ControlLoRAModel
)
image: ImageField = InputField(description="The image to encode.")
+ weight: float = InputField(description="The weight of the LoRA.", default=1.0)
def invoke(self, context: InvocationContext) -> FluxControlLoRALoaderOutput:
if not context.models.exists(self.lora.key):
@@ -43,6 +44,6 @@ def invoke(self, context: InvocationContext) -> FluxControlLoRALoaderOutput:
control_lora=ControlLoRAField(
lora=self.lora,
img=self.image,
- weight=1,
+ weight=self.weight,
)
)
diff --git a/invokeai/backend/patches/layers/set_parameter_layer.py b/invokeai/backend/patches/layers/set_parameter_layer.py
index 2d5920429f9..f0ae461f4d3 100644
--- a/invokeai/backend/patches/layers/set_parameter_layer.py
+++ b/invokeai/backend/patches/layers/set_parameter_layer.py
@@ -15,8 +15,10 @@ def __init__(self, param_name: str, weight: torch.Tensor):
self.param_name = param_name
def get_parameters(self, orig_module: torch.nn.Module, weight: float) -> dict[str, torch.Tensor]:
+ # Note: We intentionally ignore the weight parameter here. This matches the behavior in the official FLUX
+ # Control LoRA implementation.
diff = self.weight - orig_module.get_parameter(self.param_name)
- return {self.param_name: diff * weight}
+ return {self.param_name: diff}
def to(self, device: torch.device | None = None, dtype: torch.dtype | None = None):
self.weight = self.weight.to(device=device, dtype=dtype)
diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ControlLayer/ControlLayerControlAdapter.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ControlLayer/ControlLayerControlAdapter.tsx
index 1358a65e898..4880081de6f 100644
--- a/invokeai/frontend/web/src/features/controlLayers/components/ControlLayer/ControlLayerControlAdapter.tsx
+++ b/invokeai/frontend/web/src/features/controlLayers/components/ControlLayer/ControlLayerControlAdapter.tsx
@@ -162,7 +162,7 @@ export const ControlLayerControlAdapter = memo(() => {
/>
- {controlAdapter.type !== 'control_lora' && }
+
{controlAdapter.type !== 'control_lora' && (
)}
diff --git a/invokeai/frontend/web/src/features/controlLayers/store/canvasSlice.ts b/invokeai/frontend/web/src/features/controlLayers/store/canvasSlice.ts
index 7f79c391a68..fc3fd92abca 100644
--- a/invokeai/frontend/web/src/features/controlLayers/store/canvasSlice.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/store/canvasSlice.ts
@@ -74,6 +74,7 @@ import {
getReferenceImageState,
getRegionalGuidanceState,
imageDTOToImageWithDims,
+ initialControlLoRA,
initialControlNet,
initialIPAdapter,
initialT2IAdapter,
@@ -462,38 +463,64 @@ export const canvasSlice = createSlice({
}
layer.controlAdapter.model = zModelIdentifierField.parse(modelConfig);
+ // When converting between control layer types, we may need to add or remove properties. For example, ControlNet
+ // has a control mode, while T2I Adapter does not - otherwise they are the same.
+
switch (layer.controlAdapter.model.type) {
+ // Converting to T2I adapter from...
case 't2i_adapter': {
if (layer.controlAdapter.type === 'controlnet') {
+ // T2I Adapters have all the ControlNet properties, minus control mode - strip it
const { controlMode: _, ...rest } = layer.controlAdapter;
- const t2iAdapterConfig: T2IAdapterConfig = { ...rest, type: 't2i_adapter' };
+ const t2iAdapterConfig: T2IAdapterConfig = { ...initialT2IAdapter, ...rest, type: 't2i_adapter' };
layer.controlAdapter = t2iAdapterConfig;
} else if (layer.controlAdapter.type === 'control_lora') {
- const t2iAdapterConfig: T2IAdapterConfig = { ...layer.controlAdapter, ...initialT2IAdapter };
+ // Control LoRAs have only model and weight
+ const t2iAdapterConfig: T2IAdapterConfig = {
+ ...initialT2IAdapter,
+ ...layer.controlAdapter,
+ type: 't2i_adapter',
+ };
layer.controlAdapter = t2iAdapterConfig;
}
break;
}
+ // Converting to ControlNet from...
case 'controlnet': {
if (layer.controlAdapter.type === 't2i_adapter') {
+ // ControlNets have all the T2I Adapter properties, plus control mode
const controlNetConfig: ControlNetConfig = {
+ ...initialControlNet,
...layer.controlAdapter,
type: 'controlnet',
- controlMode: initialControlNet.controlMode,
};
layer.controlAdapter = controlNetConfig;
} else if (layer.controlAdapter.type === 'control_lora') {
- const controlNetConfig: ControlNetConfig = { ...layer.controlAdapter, ...initialControlNet };
+ // ControlNets have all the Control LoRA properties, plus control mode and begin/end step pct
+ const controlNetConfig: ControlNetConfig = {
+ ...initialControlNet,
+ ...layer.controlAdapter,
+ type: 'controlnet',
+ };
layer.controlAdapter = controlNetConfig;
}
break;
}
+ // Converting to ControlLoRA from...
case 'control_lora': {
- const controlLoraConfig: ControlLoRAConfig = { ...layer.controlAdapter, type: 'control_lora' };
- layer.controlAdapter = controlLoraConfig;
-
+ if (layer.controlAdapter.type === 'controlnet') {
+ // We only need the model and weight for Control LoRA
+ const { model, weight } = layer.controlAdapter;
+ const controlNetConfig: ControlLoRAConfig = { ...initialControlLoRA, model, weight };
+ layer.controlAdapter = controlNetConfig;
+ } else if (layer.controlAdapter.type === 't2i_adapter') {
+ // We only need the model and weight for Control LoRA
+ const { model, weight } = layer.controlAdapter;
+ const t2iAdapterConfig: ControlLoRAConfig = { ...initialControlLoRA, model, weight };
+ layer.controlAdapter = t2iAdapterConfig;
+ }
break;
}
@@ -518,7 +545,7 @@ export const canvasSlice = createSlice({
) => {
const { entityIdentifier, weight } = action.payload;
const layer = selectEntity(state, entityIdentifier);
- if (!layer || !layer.controlAdapter || layer.controlAdapter.type === 'control_lora') {
+ if (!layer || !layer.controlAdapter) {
return;
}
layer.controlAdapter.weight = weight;
diff --git a/invokeai/frontend/web/src/features/controlLayers/store/types.ts b/invokeai/frontend/web/src/features/controlLayers/store/types.ts
index a04637b3af8..d7eea9bb174 100644
--- a/invokeai/frontend/web/src/features/controlLayers/store/types.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/store/types.ts
@@ -298,6 +298,7 @@ export type T2IAdapterConfig = z.infer;
const zControlLoRAConfig = z.object({
type: z.literal('control_lora'),
+ weight: z.number().gte(-1).lte(2),
model: zServerValidatedModelIdentifierField.nullable(),
});
export type ControlLoRAConfig = z.infer;
diff --git a/invokeai/frontend/web/src/features/controlLayers/store/util.ts b/invokeai/frontend/web/src/features/controlLayers/store/util.ts
index e8b8b389383..d12fe837b54 100644
--- a/invokeai/frontend/web/src/features/controlLayers/store/util.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/store/util.ts
@@ -7,6 +7,7 @@ import type {
CanvasRasterLayerState,
CanvasReferenceImageState,
CanvasRegionalGuidanceState,
+ ControlLoRAConfig,
ControlNetConfig,
ImageWithDims,
IPAdapterConfig,
@@ -82,6 +83,11 @@ export const initialControlNet: ControlNetConfig = {
beginEndStepPct: [0, 0.75],
controlMode: 'balanced',
};
+export const initialControlLoRA: ControlLoRAConfig = {
+ type: 'control_lora',
+ model: null,
+ weight: 0.75,
+};
export const getReferenceImageState = (
id: string,
diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addControlAdapters.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addControlAdapters.ts
index bbf920a8e36..f340cd70a72 100644
--- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addControlAdapters.ts
+++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addControlAdapters.ts
@@ -207,7 +207,7 @@ const addControlLoRAToGraph = (
) => {
const { id, controlAdapter } = layer;
assert(controlAdapter.type === 'control_lora');
- const { model } = controlAdapter;
+ const { model, weight } = controlAdapter;
assert(model !== null);
const { image_name } = imageDTO;
@@ -216,6 +216,7 @@ const addControlLoRAToGraph = (
type: 'flux_control_lora_loader',
lora: model,
image: { image_name },
+ weight: weight,
});
g.addEdge(controlLoRA, 'control_lora', denoise, 'control_lora');
diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts
index 37e5db14e6e..8741b55a691 100644
--- a/invokeai/frontend/web/src/services/api/schema.ts
+++ b/invokeai/frontend/web/src/services/api/schema.ts
@@ -6708,6 +6708,12 @@ export type components = {
* @default null
*/
image?: components["schemas"]["ImageField"];
+ /**
+ * Weight
+ * @description The weight of the LoRA.
+ * @default 1
+ */
+ weight?: number;
/**
* type
* @default flux_control_lora_loader
@@ -6722,11 +6728,11 @@ export type components = {
*/
FluxControlLoRALoaderOutput: {
/**
- * Flux Control Lora
+ * Flux Control LoRA
* @description Control LoRAs to apply on model loading
* @default null
*/
- control_lora: components["schemas"]["ControlLoRAField"] | null;
+ control_lora: components["schemas"]["ControlLoRAField"];
/**
* type
* @default flux_control_lora_loader_output
@@ -6926,7 +6932,7 @@ export type components = {
*/
transformer?: components["schemas"]["TransformerField"];
/**
- * Control Lora
+ * Control LoRA
* @description Control LoRA model to load
* @default null
*/