more features, easy config, fixed issue where it just ignores any tag with spaces

master
mrq 2022-10-06 00:16:32 +07:00
parent fa82334f4e
commit d8f048c6f1
3 changed files with 229 additions and 36 deletions

142
.gitignore vendored

@ -0,0 +1,142 @@
# ---> Node
# data/config/master.*.json
utils/renamer/in/*.jpg
utils/renamer/in/*.png
utils/renamer/out/*.png
utils/renamer/out/*.jpg
utils/renamer/cache.json
package-lock.json
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
.pnpm-debug.log*
# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
*.lcov
# nyc test coverage
.nyc_output
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# Bower dependency directory (https://bower.io/)
bower_components
# node-waf configuration
.lock-wscript
# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release
# Dependency directories
node_modules/
jspm_packages/
# Snowpack dependency directory (https://snowpack.dev/)
web_modules/
# TypeScript cache
*.tsbuildinfo
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Optional stylelint cache
.stylelintcache
# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/
# Optional REPL history
.node_repl_history
# Output of 'npm pack'
*.tgz
# Yarn Integrity file
.yarn-integrity
# dotenv environment variable files
.env
.env.development.local
.env.test.local
.env.production.local
.env.local
# parcel-bundler cache (https://parceljs.org/)
.cache
.parcel-cache
# Next.js build output
.next
out
# Nuxt.js build / generate output
.nuxt
dist
# Gatsby files
.cache/
# Comment in the public line in if your project uses Gatsby and not Next.js
# https://nextjs.org/blog/next-9-1#public-directory-support
# public
# vuepress build output
.vuepress/dist
# vuepress v2.x temp and cache directory
.temp
.cache
# Docusaurus cache and generated files
.docusaurus
# Serverless directories
.serverless/
# FuseBox cache
.fusebox/
# DynamoDB Local files
.dynamodb/
# TernJS port file
.tern-port
# Stores VSCode versions used for testing VSCode extensions
.vscode-test
# yarn v2
.yarn/cache
.yarn/unplugged
.yarn/build-state.yml
.yarn/install-state.gz
.pnp.*

@ -23,9 +23,11 @@ What works for you will differ from what works for me, but do not be discouraged
## Acquiring Source Material
The first step of training against a subject (or art style) is to acquire source content. Hugging Face's instructions specify having three to five images, cropped to 512x512, but there's no hard upper limit on how many, nor does having more images have any bearings on the final output size or performance. However, there's a virtual limit on how many pictures to provide, as after a certain point, it'll be harder to converge (despite converging implies overfitment, I think in the context of textual inversion, it's fine), and requires a lot more iterations to train better, but 50 to 100 should be a good target.
The first step of training against a subject (or art style) is to acquire source content. Hugging Face's instructions specify having three to five images, cropped to 512x512, but there's no hard upper limit on how many, nor does having more images have any bearings on the final output size or performance. However, the more images you use, the harder it'll take for it to converge (despite convergence in typical neural network model training means overfitment).
If you're lacking material, the web UI's pre-processing tools to flip and split should work decently enough to cover the gap for low content. Flipping will duplicate images and flip them across the Y axis, (presumably) adding more symmetry to the final embedding, while splitting will help deal with non-square content and provide good coverage for partially generating your subject.
I cannot imagine a scenario where you should stick with low image counts, such as selecting from a pool and pruning for the "best of the best". If you can get lots of images, do it. While it may appear the test outputs during training looks better with a smaller pool, when it comes to real image generation, embeddings from big image pools (140-190) yieled far better results over later embeddings trained on half the size of the first one (50-100).
If you're lacking material, the web UI's pre-processing tools to flip and split should work decently enough to cover the gap for low content. Flipping will duplicate images and flip them across the Y axis, (presumably) adding more symmetry to the final embedding, while splitting will help deal with non-square content and provide good coverage for partially generating your subject (for example, bust shots, waist below, chest only, etc.).
If you rather would have finely-crafted material, you're more than welcome to manually crop and square images. A compromise for cropping an image is to expand the canvas size to square it off, and then fill the new empty space with colors to crudely blend with the background, and crudly adding color blobs to expand limbs outside the frame. It's not that imperative to do so, but it helps.
@ -76,7 +78,7 @@ An adequate starting point is simply:
uploaded on e621, [name], [filewords]
```
I've had decent results with just that for training subjects. I've had mixed results with expanding that with filling in more artists to train against, for example:
I've had decent results with just that for training subjects. I've had mixed results with expanding that by filling in more artists to train against, for example:
```
uploaded on e621, [name] by motogen, [filewords]
uploaded on e621, [name] by oaks16, [filewords]
@ -122,7 +124,7 @@ Next:
* pass in the path to the folder of your source material to train against
* put in the path to the prompt file you created earlier. if you put it in the same folder as the web UI's default prompts, just rename the filename there
* adjust how long you want the training to be done before terminating. Paperspace seems to let me do ~70000 on an A6000 before shutting down after 6 hours. An 80GB A100 will let me get shy of the full 100000 before auto-shutting down after 6 hours.
* the last two values are creature comforts and have no real effect on training, values are up to player preference
* the last two values are creature comforts and have no real effect on training, values are up to player preference.
Afterwards, hit Train, and wait and watch your creation come to life.

@ -1,24 +1,50 @@
let FS = require("fs")
let Fetch = require("node-fetch")
let files = FS.readdirSync(`./in/`);
let kson = {
files: {},
tags: {}
};
let config = {
input: `./in/`, // files to process
output: `./out/`, // files to copy files to
tags: `./tags.csv`, // csv of tags associated with the yiffy model (replace for other flavor of booru's taglist associated with the model you're training against)
cache: `./cache.json`, // JSON file of cached tags, will speed up processing if re-running
let csv = FS.readFileSync(`./tags.csv`)
rateLimit: 500, // time to wait between requests, in milliseconds, e621 imposes a rate limit of 2 requests per second
filenameLimit: 192, // maximum characters to put in the filename, necessary to abide by filesystem limitations, and to "limit" token count for the prompt parser
filter: true,
// fill it with tags of whatever you don't want to make it into the filename
// for starters, you can also add "anthro", "male", "female", as they're very common tags
filters: [
"anthro",
"fur",
"male",
"female",
"genitals",
"video games",
"animal genitalia",
/clothing$/,
],
onlyIncludeModelArtists: true, // if true, only include the artist's tag if in the model's taglist, if false, add all artists
// i've noticed some artists that weren't included in the taglist, but is available in LAION's (vanilla SD)
reverseTags: false, // inverts sorting, prioritizing tags with little representation in the model
}
let files = FS.readdirSync(config.input);
let csv = FS.readFileSync(config.tags)
csv = csv.toString().split("\n")
config.tags = {}
for ( let i in csv ) {
let [k, v] = csv[i].split(",")
kson.tags[k] = parseInt(v);
config.tags[k] = parseInt(v);
}
// fill it with tags of whatever you don't want to make it into the filename
// for starters, you can also add "anthro", "male", "female", as they're very common tags
let filters = [
"female"
];
let cache;
try {
cache = JSON.parse( FS.readFileSync(config.cache) )
} catch ( e ) {
cache = {};
}
let parse = async () => {
for ( let i in files ) {
@ -27,15 +53,21 @@ let parse = async () => {
if ( !md5 ) continue;
md5 = md5[1];
let ext = file.split(".").pop()
console.log(i, files.length, md5, ext);
console.log(`[${100.0 * i / files.length}%]: ${md5}`);
let r = await Fetch( `https://e621.net/posts.json?tags=md5:${md5}`, {
headers: {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
}
} );
let json = JSON.parse(await r.text());
json = json.posts[0];
let rateLimit = false;
if ( !cache[md5] ) {
rateLimit = true;
let r = await Fetch( `https://e621.net/posts.json?tags=md5:${md5}`, {
headers: {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
}
} );
let j = JSON.parse(await r.text());
cache[md5] = j.posts[0];
}
let json = cache[md5];
if ( !json ) continue;
tags = [];
@ -50,40 +82,57 @@ let parse = async () => {
for ( let cat in json.tags ) {
if ( cat === "artist" ) {
let tag = "by " + json.tags["artist"].join(" and ")
if ( !kson.tags[tag] ) continue;
if ( config.onlyIncludeModelArtists && !config.tags[tag] ) continue;
artist = tag;
} else for ( let k in json.tags[cat] ) {
let tag = json.tags[cat][k];
if ( !kson.tags[tag] ) continue;
if ( tag.indexOf("/") >= 0 ) continue;
if ( filters.includes(tag) ) continue;
let tag = json.tags[cat][k].replace(/_/g, " ");
if ( !config.tags[tag] ) continue;
if ( tag.indexOf("/") >= 0 ) continue; // illegal filename character
if ( config.filter ) {
let should = false;
for ( let i in config.filters ) {
let filter = config.filters[i];
if ( filter === tag || ( filter instanceof RegExp && tag.match(filter) ) ) {
should = true;
break;
}
}
if ( should ) continue;
}
tags.push(tag);
}
}
tags = tags.sort( (a, b) => {
return kson.tags[b] - kson.tags[a]
let polarity = config.reverseTags ? -1 : 1;
return (config.tags[b] - config.tags[a]) * polarity;
})
if ( artist ) tags.unshift(artist);
if ( content ) tags.unshift(content);
kson.files[md5] = tags;
console.log(tags)
let jointmp = "";
let filtered = [];
for ( let i in tags ) {
if ( (jointmp + " " + tags[i]).length > 192 ) break; // arbitrary limit for filenames and tokens, adjust if you're getting bitched at from the web UI
if ( (jointmp + " " + tags[i]).length > config.filenameLimit ) break;
jointmp += " " + tags[i];
filtered.push(tags[i])
}
let joined = filtered.join(" ")
require("fs").copyFileSync(`./in/${file}`, `./out/${joined}.${ext}`)
// NOOOOOO YOU'RE SUPPOSE TO DO IT ASYNCHRONOUSLY SO IT'S NOT BLOCKING
require("fs").copyFileSync(`${config.input}/${file}`, `${config.output}/${joined}.${ext}`)
// rate limit
await new Promise( (resolve) => {
setTimeout(resolve, 500)
if ( rateLimit && config.rateLimit ) await new Promise( (resolve) => {
setTimeout(resolve, config.rateLimit)
} )
}
// NOOOOOOOO YOU'RE WASTING SPACE BY PRETTIFYING
FS.writeFileSync(config.cache, JSON.stringify( cache, null, "\t" ))
}
parse();