1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
{
"nodes": [
{
"id": 1,
"type": "CheckpointLoaderSimple",
"params": {
"ckpt_name": "your_base_model.safetensors"
}
},
{
"id": 2,
"type": "LoraLoader",
"params": {
"lora_name": "Raphael_LoRA-000010.safetensors",
"strength_model": 1.0,
"strength_clip": 1.0
}
},
{
"id": 3,
"type": "CLIPTextEncode",
"params": {
"text": "photo of raphael, dreamy lighting, selfie, close-up, soft focus, masterpiece, high detail"
}
},
{
"id": 4,
"type": "CLIPTextEncodeNegative",
"params": {
"text": "blurry, low quality, bad anatomy, distorted face, extra limbs, watermark"
}
},
{
"id": 5,
"type": "EmptyLatentImage",
"params": {
"width": 512,
"height": 768,
"batch_size": 1
}
},
{
"id": 6,
"type": "KSampler",
"params": {
"steps": 25,
"cfg": 7.0,
"sampler_name": "Euler a",
"scheduler": "normal",
"seed": 12345678
}
},
{
"id": 7,
"type": "VAEDecode",
"params": {}
},
{
"id": 8,
"type": "SaveImage",
"params": {
"filename_prefix": "raphael_output"
}
}
],
"edges": [
[1, "MODEL", 2, "model"],
[1, "CLIP", 2, "clip"],
[2, "model", 6, "model"],
[2, "clip", 3, "clip"],
[2, "clip", 4, "clip"],
[3, "encoded", 6, "positive"],
[4, "encoded", 6, "negative"],
[5, "LATENT", 6, "latent_image"],
[6, "LATENT", 7, "LATENT"],
[7, "IMAGE", 8, "IMAGE"]
]
}