|
大佬们,求助,本人Ai绘画新手,昨天第一次安装好软件,下载的秋叶大佬的安装包,但是就是在训练Lora的时候出现了如下问题,求解答,谢谢~
usage: train_network.py [-h] [--v2] [--v_parameterization]
[--pretrained_model_name_or_path PRETRAINED_MODEL_NAME_OR_PATH]
[--tokenizer_cache_dir TOKENIZER_CACHE_DIR] [--train_data_dir TRAIN_DATA_DIR]
[--shuffle_caption] [--caption_extension CAPTION_EXTENSION]
[--caption_extention CAPTION_EXTENTION] [--keep_tokens KEEP_TOKENS] [--color_aug] [--flip_aug]
[--face_crop_aug_range FACE_CROP_AUG_RANGE] [--random_crop] [--debug_dataset]
[--resolution RESOLUTION] [--cache_latents] [--enable_bucket]
[--min_bucket_reso MIN_BUCKET_RESO] [--max_bucket_reso MAX_BUCKET_RESO]
[--bucket_reso_steps BUCKET_RESO_STEPS] [--bucket_no_upscale]
[--caption_dropout_rate CAPTION_DROPOUT_RATE]
[--caption_dropout_every_n_epochs CAPTION_DROPOUT_EVERY_N_EPOCHS]
[--caption_tag_dropout_rate CAPTION_TAG_DROPOUT_RATE] [--reg_data_dir REG_DATA_DIR]
[--in_json IN_JSON] [--dataset_repeats DATASET_REPEATS] [--output_dir OUTPUT_DIR]
[--output_name OUTPUT_NAME] [--save_precision {None,float,fp16,bf16}]
[--save_every_n_epochs SAVE_EVERY_N_EPOCHS] [--save_n_epoch_ratio SAVE_N_EPOCH_RATIO]
[--save_last_n_epochs SAVE_LAST_N_EPOCHS]
[--save_last_n_epochs_state SAVE_LAST_N_EPOCHS_STATE] [--save_state] [--resume RESUME]
[--train_batch_size TRAIN_BATCH_SIZE] [--max_token_length {None,150,225}] [--mem_eff_attn]
[--xformers] [--vae VAE] [--max_train_steps MAX_TRAIN_STEPS]
[--max_train_epochs MAX_TRAIN_EPOCHS] [--max_data_loader_n_workers MAX_DATA_LOADER_N_WORKERS]
[--persistent_data_loader_workers] [--seed SEED] [--gradient_checkpointing]
[--gradient_accumulation_steps GRADIENT_ACCUMULATION_STEPS] [--mixed_precision {no,fp16,bf16}]
[--full_fp16] [--clip_skip CLIP_SKIP] [--logging_dir LOGGING_DIR] [--log_prefix LOG_PREFIX]
[--noise_offset NOISE_OFFSET] [--lowram] [--sample_every_n_steps SAMPLE_EVERY_N_STEPS]
[--sample_every_n_epochs SAMPLE_EVERY_N_EPOCHS] [--sample_prompts SAMPLE_PROMPTS]
[--sample_sampler {ddim,pndm,lms,euler,euler_a,heun,dpm_2,dpm_2_a,dpmsolver,dpmsolver++,dpmsingle,k_lms,k_euler,k_euler_a,k_dpm_2,k_dpm_2_a}]
[--prior_loss_weight PRIOR_LOSS_WEIGHT] [--optimizer_type OPTIMIZER_TYPE] [--use_8bit_adam]
[--use_lion_optimizer] [--learning_rate LEARNING_RATE] [--max_grad_norm MAX_GRAD_NORM]
[--optimizer_args [OPTIMIZER_ARGS ...]] [--lr_scheduler LR_SCHEDULER]
[--lr_warmup_steps LR_WARMUP_STEPS] [--lr_scheduler_num_cycles LR_SCHEDULER_NUM_CYCLES]
[--lr_scheduler_power LR_SCHEDULER_POWER] [--dataset_config DATASET_CONFIG] [--no_metadata]
[--save_model_as {None,ckpt,pt,safetensors}] [--unet_lr UNET_LR]
[--text_encoder_lr TEXT_ENCODER_LR] [--network_weights NETWORK_WEIGHTS]
[--network_module NETWORK_MODULE] [--network_dim NETWORK_DIM] [--network_alpha NETWORK_ALPHA]
[--network_args [NETWORK_ARGS ...]] [--network_train_unet_only]
[--network_train_text_encoder_only] [--training_comment TRAINING_COMMENT]
train_network.py: error: unrecognized arguments: --log_with=tensorboard
Traceback (most recent call last):
File "C:\Users\hxy23\AppData\Local\Programs\Python\Python310\lib\runpy.py", line 196, in _run_module_as_main
return _run_code(code, main_globals, None,
File "C:\Users\hxy23\AppData\Local\Programs\Python\Python310\lib\runpy.py", line 86, in _run_code
exec(code, run_globals)
File "D:\BaiduNetdiskDownload\lora\lora-scripts\venv\lib\site-packages\accelerate\commands\launch.py", line 1114, in <module>
main()
File "D:\BaiduNetdiskDownload\lora\lora-scripts\venv\lib\site-packages\accelerate\commands\launch.py", line 1110, in main
launch_command(args)
File "D:\BaiduNetdiskDownload\lora\lora-scripts\venv\lib\site-packages\accelerate\commands\launch.py", line 1104, in launch_command
simple_launcher(args)
File "D:\BaiduNetdiskDownload\lora\lora-scripts\venv\lib\site-packages\accelerate\commands\launch.py", line 567, in simple_launcher
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
subprocess.CalledProcessError: Command '['D:\\BaiduNetdiskDownload\\lora\\lora-scripts\\venv\\Scripts\\python.exe', './sd-scripts/train_network.py', '--enable_bucket', '--pretrained_model_name_or_path=./sd-models/model.ckpt', '--train_data_dir=./train/atora', '--output_dir=./output', '--logging_dir=./logs', '--log_prefix=satora', '--resolution=512,512', '--network_module=networks.lora', '--max_train_epochs=10', '--learning_rate=1e-4', '--unet_lr=1e-4', '--text_encoder_lr=1e-5', '--lr_scheduler=cosine_with_restarts', '--lr_warmup_steps=0', '--lr_scheduler_num_cycles=1', '--network_dim=32', '--network_alpha=32', '--output_name=satora', '--train_batch_size=1', '--save_every_n_epochs=2', '--mixed_precision=fp16', '--save_precision=fp16', '--seed=1337', '--cache_latents', '--prior_loss_weight=1', '--max_token_length=225', '--caption_extension=.txt', '--save_model_as=safetensors', '--min_bucket_reso=256', '--max_bucket_reso=1024', '--keep_tokens=0', '--xformers', '--shuffle_caption', '--clip_skip=2', '--optimizer_type=AdamW8bit', '--persistent_data_loader_workers', '--log_with=tensorboard']' returned non-zero exit status 2.
Train finished
|
|