Created
August 22, 2022 16:37
-
-
Save shellward/384fc5e35c75e406e92ecfb0a8290849 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| { | |
| "cells": [ | |
| { | |
| "cell_type": "code", | |
| "execution_count": null, | |
| "metadata": { | |
| "colab": { | |
| "base_uri": "https://localhost:8080/" | |
| }, | |
| "id": "WSoOPmCCVvwd", | |
| "outputId": "c10fe43d-b616-4d40-ace9-cd3426a72aba" | |
| }, | |
| "outputs": [], | |
| "source": [ | |
| "#@title Mount Google-Drive, needs to be approved manually\n", | |
| "from google.colab import drive\n", | |
| "drive.mount('/content/gdrive')" | |
| ] | |
| }, | |
| { | |
| "cell_type": "markdown", | |
| "metadata": {}, | |
| "source": [ | |
| "One weird thing w/ this colab:\n", | |
| "\n", | |
| "you must select a valid input image, even if it's out of the directory you're actually loading in and using- something to do with the structure of the img2img script on the compvis repo. I put this together as quick as I could to get it working so it's probably very easy to fix lol" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": null, | |
| "metadata": {}, | |
| "outputs": [], | |
| "source": [ | |
| "#colab variables\n", | |
| "model_ckpt = '/content/gdrive/My Drive/Colab Notebooks/models/model.ckpt' #@param {type:\"string\"}\n", | |
| "prompt = \"the delirious fruit god bust, colored pencil drawing\" #@param {type:\"string\"}\n", | |
| "outdir='/content/gdrive/MyDrive/SD/OUT/IMG_SEQ_021' #@param {type:\"string\"}\n", | |
| "ddim_steps = 75 #@param {type:\"number\"}\n", | |
| "ddim_eta = 0 #@param {type:\"number\"}\n", | |
| "n_iter = 2600 #@param {type:\"number\"}\n", | |
| "W=512 #@param {type:\"number\"}\n", | |
| "H=512 #@param {type:\"number\"}\n", | |
| "n_samples=1 #@param {type:\"number\"}\n", | |
| "scale=5.0 #@param {type:\"number\"}\n", | |
| "init_img_dir='/content/gdrive/MyDrive/INIT_IMG_DIR' #@param {type:\"string\"}\n", | |
| "plms=False #@param {type:\"boolean\"}\n", | |
| "n_rows=1 #@param {type:\"number\"}\n", | |
| "from_file=None #@param {type:\"string\"}\n", | |
| "strength=.52 #@param {type:\"number\"}\n", | |
| "precision=\"autocast\" #@param {type:\"string\"}\n", | |
| "skip_save=False #@param {type:\"boolean\"}\n", | |
| "skip_grid=True\n", | |
| "seed=2600\n" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": null, | |
| "metadata": { | |
| "colab": { | |
| "base_uri": "https://localhost:8080/", | |
| "height": 1000 | |
| }, | |
| "id": "NHgUAp48qwoG", | |
| "outputId": "042a9be5-56bb-497b-890a-bc8ab45bf7b9" | |
| }, | |
| "outputs": [], | |
| "source": [ | |
| "#@title Installation\n", | |
| "%cd /content/\n", | |
| "!pip install clip-by-openai\n", | |
| "!git clone https://github.com/CompVis/latent-diffusion.git\n", | |
| "!git clone https://github.com/CompVis/taming-transformers\n", | |
| "!git clone https://github.com/CompVis/stable-diffusion.git\n", | |
| "!pip install -e ./taming-transformers\n", | |
| "!pip install omegaconf>=2.0.0 pytorch-lightning>=1.0.8 torch-fidelity einops\n", | |
| "!pip install transformers\n", | |
| "!pip install kornia\n", | |
| "import sys\n", | |
| "sys.path.append(\".\")\n", | |
| "sys.path.append('./taming-transformers')\n", | |
| "from taming.models import vqgan \n", | |
| "!cp /content/stable-diffusion/ldm/modules/encoders/modules.py /content/latent-diffusion/ldm/modules/encoders/modules.py\n", | |
| "!nvidia-smi" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 1, | |
| "metadata": { | |
| "colab": { | |
| "base_uri": "https://localhost:8080/" | |
| }, | |
| "id": "fnGwQRhtyBhb", | |
| "outputId": "e3299938-b138-4e20-c522-497a23a76c5a" | |
| }, | |
| "outputs": [ | |
| { | |
| "name": "stdout", | |
| "output_type": "stream", | |
| "text": [ | |
| "/content/stable-diffusion\n" | |
| ] | |
| } | |
| ], | |
| "source": [ | |
| "#@title loading utils\n", | |
| "%cd /content/stable-diffusion/\n", | |
| "import torch\n", | |
| "from omegaconf import OmegaConf\n", | |
| "\n", | |
| "from ldm.util import instantiate_from_config\n", | |
| "\n", | |
| "\n", | |
| "def load_model_from_config(config, ckpt):\n", | |
| " print(f\"Loading model from {ckpt}\")\n", | |
| " pl_sd = torch.load(ckpt)#, map_location=\"cpu\")\n", | |
| " sd = pl_sd[\"state_dict\"]\n", | |
| " model = instantiate_from_config(config.model)\n", | |
| " m, u = model.load_state_dict(sd, strict=False)\n", | |
| " model.cuda()\n", | |
| " model.eval()\n", | |
| " return model\n", | |
| "\n", | |
| "\n", | |
| "def get_model():\n", | |
| " config = OmegaConf.load(\"/content/stable-diffusion/configs/stable-diffusion/v1-inference.yaml\") \n", | |
| " model = load_model_from_config(config, f\"{model_ckpt}\")\n", | |
| " return model" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 2, | |
| "metadata": { | |
| "id": "BPnyd-XUKbfE" | |
| }, | |
| "outputs": [], | |
| "source": [ | |
| "#@title Import stuff\n", | |
| "import argparse, os, sys, glob\n", | |
| "import torch\n", | |
| "import numpy as np\n", | |
| "import PIL\n", | |
| "from omegaconf import OmegaConf\n", | |
| "from PIL import Image\n", | |
| "from tqdm.auto import tqdm, trange\n", | |
| "from einops import rearrange\n", | |
| "from torchvision.utils import make_grid\n", | |
| "import transformers\n", | |
| "import gc\n", | |
| "from ldm.util import instantiate_from_config\n", | |
| "from ldm.models.diffusion.ddim import DDIMSampler\n", | |
| "from tqdm import tqdm, trange\n", | |
| "from itertools import islice\n", | |
| "from einops import rearrange, repeat\n", | |
| "from torch import autocast\n", | |
| "from contextlib import nullcontext\n", | |
| "import time\n", | |
| "from pytorch_lightning import seed_everything\n", | |
| "from ldm.models.diffusion.plms import PLMSSampler\n", | |
| "from google.colab.patches import cv2_imshow\n" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": 14, | |
| "metadata": { | |
| "id": "Z6OadItmKyCd" | |
| }, | |
| "outputs": [], | |
| "source": [ | |
| "\n", | |
| "INPUT_SIZE = (W,H)\n", | |
| "\n", | |
| "\n", | |
| "def chunk(it, size):\n", | |
| " it = iter(it)\n", | |
| " return iter(lambda: tuple(islice(it, size)), ())\n", | |
| "\n", | |
| "\n", | |
| "def load_model_from_config(config, ckpt, verbose=False):\n", | |
| " print(f\"Loading model from {ckpt}\")\n", | |
| " pl_sd = torch.load(ckpt, map_location=\"cpu\")\n", | |
| " if \"global_step\" in pl_sd:\n", | |
| " print(f\"Global Step: {pl_sd['global_step']}\")\n", | |
| " sd = pl_sd[\"state_dict\"]\n", | |
| " model = instantiate_from_config(config.model)\n", | |
| " m, u = model.load_state_dict(sd, strict=False)\n", | |
| " if len(m) > 0 and verbose:\n", | |
| " print(\"missing keys:\")\n", | |
| " print(m)\n", | |
| " if len(u) > 0 and verbose:\n", | |
| " print(\"unexpected keys:\")\n", | |
| " print(u)\n", | |
| "\n", | |
| " model.cuda()\n", | |
| " model.eval()\n", | |
| " return model\n", | |
| "\n", | |
| "\n", | |
| "def load_img(path):\n", | |
| " image = Image.open(path).convert(\"RGB\")\n", | |
| " image = image.resize((W,H), resample=PIL.Image.LANCZOS)\n", | |
| " w, h = image.size\n", | |
| " print(f\"loaded input image of size ({w}, {h}) from {path}\")\n", | |
| " w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32\n", | |
| " # image = image.resize((w, h), resample=PIL.Image.LANCZOS)\n", | |
| " image = np.array(image).astype(np.float32) / 255.0\n", | |
| " image = image[None].transpose(0, 3, 1, 2)\n", | |
| " image = torch.from_numpy(image)\n", | |
| " return 2.*image - 1.\n", | |
| "\n", | |
| "def run(opt):\n", | |
| " torch.cuda.empty_cache()\n", | |
| " gc.collect()\n", | |
| " prompt = opt.prompt,\n", | |
| " outdir=opt.outdir,\n", | |
| " ddim_steps = opt.ddim_steps,\n", | |
| " ddim_eta = opt.ddim_eta,\n", | |
| " n_iter = opt.n_iter,\n", | |
| " W=opt.W,\n", | |
| " H=opt.H,\n", | |
| " n_samples=opt.n_samples,\n", | |
| " scale=opt.scale,\n", | |
| " init_img=opt.init_img,\n", | |
| " init_img_dir=opt.init_img_dir\n", | |
| " plms=opt.plms\n", | |
| " n_rows=opt.n_rows,\n", | |
| " from_file=opt.from_file\n", | |
| " strength=opt.strength\n", | |
| " precision=opt.precision\n", | |
| " skip_save=opt.skip_save\n", | |
| " skip_grid=opt.skip_grid\n", | |
| " seed = opt.seed\n", | |
| " # opt = parser.parse_args()\n", | |
| " # seed_everything(opt.seed)\n", | |
| "\n", | |
| " config = OmegaConf.load(\"/content/stable-diffusion/configs/stable-diffusion/v1-inference.yaml\") # TODO: Optionally download from same location as ckpt and chnage this logic\n", | |
| " model = load_model_from_config(config, \"/content/gdrive/MyDrive/SD/sd-v1-3-full-ema/sd-v1-3-full-ema.ckpt\") # TODO: check path\n", | |
| "\n", | |
| " device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n", | |
| " model = model.to(device)\n", | |
| "\n", | |
| " if opt.plms:\n", | |
| " raise NotImplementedError(\"PLMS sampler not (yet) supported\")\n", | |
| " sampler = PLMSSampler(model)\n", | |
| " else:\n", | |
| " sampler = DDIMSampler(model)\n", | |
| "\n", | |
| " os.makedirs(opt.outdir, exist_ok=True)\n", | |
| " outpath = opt.outdir\n", | |
| "\n", | |
| " batch_size = opt.n_samples\n", | |
| " n_rows = opt.n_rows if opt.n_rows > 0 else batch_size\n", | |
| " if not opt.from_file:\n", | |
| " prompt = opt.prompt\n", | |
| " assert prompt is not None\n", | |
| " data = [batch_size * [prompt]]\n", | |
| "\n", | |
| " else:\n", | |
| " print(f\"reading prompts from {opt.from_file}\")\n", | |
| " with open(opt.from_file, \"r\") as f:\n", | |
| " data = f.read().splitlines()\n", | |
| " data = list(chunk(data, batch_size))\n", | |
| "\n", | |
| " sample_path = os.path.join(outpath, \"samples\")\n", | |
| " os.makedirs(sample_path, exist_ok=True)\n", | |
| " base_count = len(os.listdir(sample_path))\n", | |
| " grid_count = len(os.listdir(outpath)) - 1\n", | |
| "\n", | |
| " \n", | |
| " sampler.make_schedule(ddim_num_steps=opt.ddim_steps, ddim_eta=opt.ddim_eta, verbose=False)\n", | |
| "\n", | |
| " assert 0. <= opt.strength <= 1., 'can only work with strength in [0.0, 1.0]'\n", | |
| " t_enc = int(opt.strength * opt.ddim_steps)\n", | |
| " print(f\"target t_enc is {t_enc} steps\")\n", | |
| "\n", | |
| " precision_scope = autocast if opt.precision == \"autocast\" else nullcontext\n", | |
| " with torch.no_grad():\n", | |
| " with precision_scope(\"cuda\"):\n", | |
| " with model.ema_scope():\n", | |
| " tic = time.time()\n", | |
| " all_samples = list()\n", | |
| " for n in trange(opt.n_iter, desc=\"Sampling\"):\n", | |
| " current_input_frame=f\"{opt.init_img_dir}/{n+1:04}.png\"\n", | |
| " # /content/gdrive/MyDrive/SD/IN/A/098.0.png\n", | |
| " #current_input_frame=f\"/content/gdrive/MyDrive/SD/IN/A/{n+1:03}.0.png\"\n", | |
| " assert os.path.isfile(current_input_frame)\n", | |
| "\n", | |
| " init_image_ref = load_img(current_input_frame).to(device)\n", | |
| " init_image = repeat(init_image_ref, '1 ... -> b ...', b=batch_size)\n", | |
| " \n", | |
| " init_latent = model.get_first_stage_encoding(model.encode_first_stage(init_image)) # move to latent space\n", | |
| "\n", | |
| " #BLEND PREV FRAME\n", | |
| " #UNCOMMENT TO BLEND\n", | |
| " # if n>1:\n", | |
| " # previous_input_frame= f\"{opt.init_img_dir}/{n:04}.jpg\"\n", | |
| " # assert os.path.isfile(previous_input_frame)\n", | |
| " # _prev_frame_ref = load_img(previous_input_frame).to(device)\n", | |
| " # init_image = blend_frames(init_image, _prev_frame_ref, strength=strength)\n", | |
| " # init_latent = model.get_first_stage_encoding(model.encode_first_stage(init_image)) # move to latent space\n", | |
| "\n", | |
| "\n", | |
| " for prompts in tqdm(data, desc=\"data\"):\n", | |
| " uc = None\n", | |
| " if opt.scale != 1.0:\n", | |
| " uc = model.get_learned_conditioning(batch_size * [\"\"])\n", | |
| " if isinstance(prompts, tuple):\n", | |
| " prompts = list(prompts)\n", | |
| " c = model.get_learned_conditioning(prompts)\n", | |
| "\n", | |
| " # encode (scaled latent)\n", | |
| " z_enc = sampler.stochastic_encode(init_latent, torch.tensor([t_enc]*batch_size).to(device))\n", | |
| " # decode it\n", | |
| " samples = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=opt.scale,\n", | |
| " unconditional_conditioning=uc,)\n", | |
| "\n", | |
| " x_samples = model.decode_first_stage(samples)\n", | |
| " x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)\n", | |
| "\n", | |
| " if not opt.skip_save:\n", | |
| " for x_sample in x_samples:\n", | |
| " x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')\n", | |
| " #cv2_imshow(Image.fromarray(x_sample.astype(np.uint8)))\n", | |
| " Image.fromarray(x_sample.astype(np.uint8)).save(\n", | |
| " os.path.join(sample_path, f\"{base_count:05}.jpg\"))\n", | |
| " base_count += 1\n", | |
| " all_samples.append(x_samples)\n", | |
| "\n", | |
| " if not opt.skip_grid:\n", | |
| " # additionally, save as grid\n", | |
| " grid = torch.stack(all_samples, 0)\n", | |
| " grid = rearrange(grid, 'n b c h w -> (n b) c h w')\n", | |
| " grid = make_grid(grid, nrow=n_rows)\n", | |
| "\n", | |
| " # to image\n", | |
| " grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy()\n", | |
| " Image.fromarray(grid.astype(np.uint8)).save(os.path.join(outpath, f'grid-{grid_count:04}.png'))\n", | |
| " grid_count += 1\n", | |
| "\n", | |
| " toc = time.time()\n", | |
| "\n", | |
| " print(f\"Your samples are ready and waiting for you here: \\n{outpath} \\n\"\n", | |
| " f\" \\nEnjoy.\")\n", | |
| " \n", | |
| "def blend_frames(frame1, frame2, strength=0.5):\n", | |
| " return (1-strength)*frame1 + strength*frame2\n", | |
| " \n" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": null, | |
| "metadata": { | |
| "colab": { | |
| "base_uri": "https://localhost:8080/", | |
| "height": 1000 | |
| }, | |
| "id": "s1ywvU-5LHC_", | |
| "outputId": "61557c4f-0186-42fc-a967-c303173833c4" | |
| }, | |
| "outputs": [], | |
| "source": [ | |
| "for i in range(4000):\n", | |
| " args = argparse.Namespace(\n", | |
| " prompt = prompt\n", | |
| " outdir=outdir\n", | |
| " ddim_steps = ddim_steps,\n", | |
| " ddim_eta = ddim_eta,\n", | |
| " n_iter = n_iter,\n", | |
| " W=W,\n", | |
| " H=H,\n", | |
| " n_samples=n_samples,\n", | |
| " scale=scale,\n", | |
| " init_img_dir=init_img_dir,\n", | |
| " init_img=init_img\n", | |
| " plms=plms,\n", | |
| " n_rows=n_rows,\n", | |
| " from_file=from_file,\n", | |
| " strength=.strength,\n", | |
| " precision=precision,\n", | |
| " skip_save=skip_save,\n", | |
| " skip_grid=skip_grid,\n", | |
| " seed=seed\n", | |
| " )\n", | |
| " run(args)" | |
| ] | |
| }, | |
| { | |
| "cell_type": "code", | |
| "execution_count": null, | |
| "metadata": { | |
| "id": "l1BtLj8SBTay" | |
| }, | |
| "outputs": [], | |
| "source": [ | |
| "#Extras for YT-DL\n", | |
| "\n", | |
| "\n", | |
| "%pip install yt-dlp\n", | |
| "%pip install ffmpeg\n", | |
| "\n", | |
| "%sudo curl -L https://yt-dl.org/downloads/latest/youtube-dl -o /usr/local/bin/youtube-dl\n", | |
| "%sudo chmod a+rx /usr/local/bin/youtube-dl\n", | |
| "\n", | |
| "import os\n", | |
| "import subprocess\n", | |
| "import sys\n", | |
| "import cv2\n", | |
| "\n", | |
| "\n", | |
| "\n", | |
| "url = 'YT LINK'\n", | |
| "drive__output_path = '/content/gdrive/MyDrive/TYDL_OUT/'\n", | |
| "def download_video(url, path):\n", | |
| " out1 = subprocess.check_output('youtube-dl -f mp4 ' + url + ' -o ' + path + '.mp4',shell=True)\n", | |
| " print(out1)\n", | |
| " return path\n", | |
| "\n", | |
| "import cv2\n", | |
| "def extract_every_nth_frame(video,path, n):\n", | |
| " cap = cv2.VideoCapture(video)\n", | |
| " i = 0\n", | |
| " #remove all existing png files in dir\n", | |
| " for the_file in os.listdir(path):\n", | |
| " file_path = os.path.join(path, the_file)\n", | |
| " try:\n", | |
| " if os.path.isfile(file_path):\n", | |
| " os.unlink(file_path)\n", | |
| " except Exception as e:\n", | |
| " print(e)\n", | |
| " \n", | |
| " while(cap.isOpened()):\n", | |
| " ret, frame = cap.read()\n", | |
| " if ret == False:\n", | |
| " break\n", | |
| " if i % n == 0:\n", | |
| " cv2.imwrite(f'{path}/{1+(i/n):05}' + '.png', frame)\n", | |
| " i += 1\n", | |
| " cap.release()\n", | |
| " cv2.destroyAllWindows()\n", | |
| "\n", | |
| "\n", | |
| "extract_every_nth_frame(download_video(url), 3)\n" | |
| ] | |
| } | |
| ], | |
| "metadata": { | |
| "accelerator": "GPU", | |
| "colab": { | |
| "collapsed_sections": [], | |
| "machine_shape": "hm", | |
| "name": "StableDiff_2", | |
| "provenance": [] | |
| }, | |
| "gpuClass": "standard", | |
| "kernelspec": { | |
| "display_name": "Python 3.10.2 64-bit", | |
| "language": "python", | |
| "name": "python3" | |
| }, | |
| "language_info": { | |
| "name": "python", | |
| "version": "3.10.2" | |
| }, | |
| "vscode": { | |
| "interpreter": { | |
| "hash": "6a57af3429fe39f1db95ae52500f0dc20b0d5b033a19c06462446e07082bc71c" | |
| } | |
| }, | |
| "widgets": { | |
| "application/vnd.jupyter.widget-state+json": { | |
| "1a862bf1077a41029a3c6934fc306c24": { | |
| "model_module": "@jupyter-widgets/controls", | |
| "model_module_version": "2.0.0", | |
| "model_name": "HTMLModel", | |
| "state": { | |
| "_dom_classes": [], | |
| "_model_module": "@jupyter-widgets/controls", | |
| "_model_module_version": "2.0.0", | |
| "_model_name": "HTMLModel", | |
| "_view_count": null, | |
| "_view_module": "@jupyter-widgets/controls", | |
| "_view_module_version": "2.0.0", | |
| "_view_name": "HTMLView", | |
| "description": "", | |
| "description_allow_html": false, | |
| "layout": "IPY_MODEL_a81d0ff2fad64602b9d7ec04692e082d", | |
| "placeholder": "", | |
| "style": "IPY_MODEL_26030a13461d420586db56729a6f939f", | |
| "tabbable": null, | |
| "tooltip": null, | |
| "value": " 4/420 [05:30<9:21:05, 80.93s/it]" | |
| } | |
| }, | |
| "26030a13461d420586db56729a6f939f": { | |
| "model_module": "@jupyter-widgets/controls", | |
| "model_module_version": "2.0.0", | |
| "model_name": "HTMLStyleModel", | |
| "state": { | |
| "_model_module": "@jupyter-widgets/controls", | |
| "_model_module_version": "2.0.0", | |
| "_model_name": "HTMLStyleModel", | |
| "_view_count": null, | |
| "_view_module": "@jupyter-widgets/base", | |
| "_view_module_version": "2.0.0", | |
| "_view_name": "StyleView", | |
| "background": null, | |
| "description_width": "", | |
| "font_size": null, | |
| "text_color": null | |
| } | |
| }, | |
| "3e8a7e84e83e4846b2df78df756b5f5b": { | |
| "model_module": "@jupyter-widgets/controls", | |
| "model_module_version": "2.0.0", | |
| "model_name": "HBoxModel", | |
| "state": { | |
| "_dom_classes": [], | |
| "_model_module": "@jupyter-widgets/controls", | |
| "_model_module_version": "2.0.0", | |
| "_model_name": "HBoxModel", | |
| "_view_count": null, | |
| "_view_module": "@jupyter-widgets/controls", | |
| "_view_module_version": "2.0.0", | |
| "_view_name": "HBoxView", | |
| "box_style": "", | |
| "children": [ | |
| "IPY_MODEL_a644e33e55674207929f1b88fd93d9bf", | |
| "IPY_MODEL_6a78d0ade9b846a3887f68fca592fb16", | |
| "IPY_MODEL_1a862bf1077a41029a3c6934fc306c24" | |
| ], | |
| "layout": "IPY_MODEL_c413cbf82d5441a79a2636fc49d090ac", | |
| "tabbable": null, | |
| "tooltip": null | |
| } | |
| }, | |
| "5869dca9f06242c3af67bfc2ce980159": { | |
| "model_module": "@jupyter-widgets/controls", | |
| "model_module_version": "2.0.0", | |
| "model_name": "HTMLStyleModel", | |
| "state": { | |
| "_model_module": "@jupyter-widgets/controls", | |
| "_model_module_version": "2.0.0", | |
| "_model_name": "HTMLStyleModel", | |
| "_view_count": null, | |
| "_view_module": "@jupyter-widgets/base", | |
| "_view_module_version": "2.0.0", | |
| "_view_name": "StyleView", | |
| "background": null, | |
| "description_width": "", | |
| "font_size": null, | |
| "text_color": null | |
| } | |
| }, | |
| "6a78d0ade9b846a3887f68fca592fb16": { | |
| "model_module": "@jupyter-widgets/controls", | |
| "model_module_version": "2.0.0", | |
| "model_name": "FloatProgressModel", | |
| "state": { | |
| "_dom_classes": [], | |
| "_model_module": "@jupyter-widgets/controls", | |
| "_model_module_version": "2.0.0", | |
| "_model_name": "FloatProgressModel", | |
| "_view_count": null, | |
| "_view_module": "@jupyter-widgets/controls", | |
| "_view_module_version": "2.0.0", | |
| "_view_name": "ProgressView", | |
| "bar_style": "danger", | |
| "description": "", | |
| "description_allow_html": false, | |
| "layout": "IPY_MODEL_df608b7bf0084ebca2b3601147d1fa1b", | |
| "max": 420, | |
| "min": 0, | |
| "orientation": "horizontal", | |
| "style": "IPY_MODEL_9ec62b96d900460a8e43e0dcaf99c975", | |
| "tabbable": null, | |
| "tooltip": null, | |
| "value": 4 | |
| } | |
| }, | |
| "9ec62b96d900460a8e43e0dcaf99c975": { | |
| "model_module": "@jupyter-widgets/controls", | |
| "model_module_version": "2.0.0", | |
| "model_name": "ProgressStyleModel", | |
| "state": { | |
| "_model_module": "@jupyter-widgets/controls", | |
| "_model_module_version": "2.0.0", | |
| "_model_name": "ProgressStyleModel", | |
| "_view_count": null, | |
| "_view_module": "@jupyter-widgets/base", | |
| "_view_module_version": "2.0.0", | |
| "_view_name": "StyleView", | |
| "bar_color": null, | |
| "description_width": "" | |
| } | |
| }, | |
| "a644e33e55674207929f1b88fd93d9bf": { | |
| "model_module": "@jupyter-widgets/controls", | |
| "model_module_version": "2.0.0", | |
| "model_name": "HTMLModel", | |
| "state": { | |
| "_dom_classes": [], | |
| "_model_module": "@jupyter-widgets/controls", | |
| "_model_module_version": "2.0.0", | |
| "_model_name": "HTMLModel", | |
| "_view_count": null, | |
| "_view_module": "@jupyter-widgets/controls", | |
| "_view_module_version": "2.0.0", | |
| "_view_name": "HTMLView", | |
| "description": "", | |
| "description_allow_html": false, | |
| "layout": "IPY_MODEL_f1723874edc140dd970191fac8d1d45d", | |
| "placeholder": "", | |
| "style": "IPY_MODEL_5869dca9f06242c3af67bfc2ce980159", | |
| "tabbable": null, | |
| "tooltip": null, | |
| "value": "Sampling: 1%" | |
| } | |
| }, | |
| "a81d0ff2fad64602b9d7ec04692e082d": { | |
| "model_module": "@jupyter-widgets/base", | |
| "model_module_version": "2.0.0", | |
| "model_name": "LayoutModel", | |
| "state": { | |
| "_model_module": "@jupyter-widgets/base", | |
| "_model_module_version": "2.0.0", | |
| "_model_name": "LayoutModel", | |
| "_view_count": null, | |
| "_view_module": "@jupyter-widgets/base", | |
| "_view_module_version": "2.0.0", | |
| "_view_name": "LayoutView", | |
| "align_content": null, | |
| "align_items": null, | |
| "align_self": null, | |
| "border_bottom": null, | |
| "border_left": null, | |
| "border_right": null, | |
| "border_top": null, | |
| "bottom": null, | |
| "display": null, | |
| "flex": null, | |
| "flex_flow": null, | |
| "grid_area": null, | |
| "grid_auto_columns": null, | |
| "grid_auto_flow": null, | |
| "grid_auto_rows": null, | |
| "grid_column": null, | |
| "grid_gap": null, | |
| "grid_row": null, | |
| "grid_template_areas": null, | |
| "grid_template_columns": null, | |
| "grid_template_rows": null, | |
| "height": null, | |
| "justify_content": null, | |
| "justify_items": null, | |
| "left": null, | |
| "margin": null, | |
| "max_height": null, | |
| "max_width": null, | |
| "min_height": null, | |
| "min_width": null, | |
| "object_fit": null, | |
| "object_position": null, | |
| "order": null, | |
| "overflow": null, | |
| "padding": null, | |
| "right": null, | |
| "top": null, | |
| "visibility": null, | |
| "width": null | |
| } | |
| }, | |
| "c413cbf82d5441a79a2636fc49d090ac": { | |
| "model_module": "@jupyter-widgets/base", | |
| "model_module_version": "2.0.0", | |
| "model_name": "LayoutModel", | |
| "state": { | |
| "_model_module": "@jupyter-widgets/base", | |
| "_model_module_version": "2.0.0", | |
| "_model_name": "LayoutModel", | |
| "_view_count": null, | |
| "_view_module": "@jupyter-widgets/base", | |
| "_view_module_version": "2.0.0", | |
| "_view_name": "LayoutView", | |
| "align_content": null, | |
| "align_items": null, | |
| "align_self": null, | |
| "border_bottom": null, | |
| "border_left": null, | |
| "border_right": null, | |
| "border_top": null, | |
| "bottom": null, | |
| "display": null, | |
| "flex": null, | |
| "flex_flow": null, | |
| "grid_area": null, | |
| "grid_auto_columns": null, | |
| "grid_auto_flow": null, | |
| "grid_auto_rows": null, | |
| "grid_column": null, | |
| "grid_gap": null, | |
| "grid_row": null, | |
| "grid_template_areas": null, | |
| "grid_template_columns": null, | |
| "grid_template_rows": null, | |
| "height": null, | |
| "justify_content": null, | |
| "justify_items": null, | |
| "left": null, | |
| "margin": null, | |
| "max_height": null, | |
| "max_width": null, | |
| "min_height": null, | |
| "min_width": null, | |
| "object_fit": null, | |
| "object_position": null, | |
| "order": null, | |
| "overflow": null, | |
| "padding": null, | |
| "right": null, | |
| "top": null, | |
| "visibility": null, | |
| "width": null | |
| } | |
| }, | |
| "df608b7bf0084ebca2b3601147d1fa1b": { | |
| "model_module": "@jupyter-widgets/base", | |
| "model_module_version": "2.0.0", | |
| "model_name": "LayoutModel", | |
| "state": { | |
| "_model_module": "@jupyter-widgets/base", | |
| "_model_module_version": "2.0.0", | |
| "_model_name": "LayoutModel", | |
| "_view_count": null, | |
| "_view_module": "@jupyter-widgets/base", | |
| "_view_module_version": "2.0.0", | |
| "_view_name": "LayoutView", | |
| "align_content": null, | |
| "align_items": null, | |
| "align_self": null, | |
| "border_bottom": null, | |
| "border_left": null, | |
| "border_right": null, | |
| "border_top": null, | |
| "bottom": null, | |
| "display": null, | |
| "flex": null, | |
| "flex_flow": null, | |
| "grid_area": null, | |
| "grid_auto_columns": null, | |
| "grid_auto_flow": null, | |
| "grid_auto_rows": null, | |
| "grid_column": null, | |
| "grid_gap": null, | |
| "grid_row": null, | |
| "grid_template_areas": null, | |
| "grid_template_columns": null, | |
| "grid_template_rows": null, | |
| "height": null, | |
| "justify_content": null, | |
| "justify_items": null, | |
| "left": null, | |
| "margin": null, | |
| "max_height": null, | |
| "max_width": null, | |
| "min_height": null, | |
| "min_width": null, | |
| "object_fit": null, | |
| "object_position": null, | |
| "order": null, | |
| "overflow": null, | |
| "padding": null, | |
| "right": null, | |
| "top": null, | |
| "visibility": null, | |
| "width": null | |
| } | |
| }, | |
| "f1723874edc140dd970191fac8d1d45d": { | |
| "model_module": "@jupyter-widgets/base", | |
| "model_module_version": "2.0.0", | |
| "model_name": "LayoutModel", | |
| "state": { | |
| "_model_module": "@jupyter-widgets/base", | |
| "_model_module_version": "2.0.0", | |
| "_model_name": "LayoutModel", | |
| "_view_count": null, | |
| "_view_module": "@jupyter-widgets/base", | |
| "_view_module_version": "2.0.0", | |
| "_view_name": "LayoutView", | |
| "align_content": null, | |
| "align_items": null, | |
| "align_self": null, | |
| "border_bottom": null, | |
| "border_left": null, | |
| "border_right": null, | |
| "border_top": null, | |
| "bottom": null, | |
| "display": null, | |
| "flex": null, | |
| "flex_flow": null, | |
| "grid_area": null, | |
| "grid_auto_columns": null, | |
| "grid_auto_flow": null, | |
| "grid_auto_rows": null, | |
| "grid_column": null, | |
| "grid_gap": null, | |
| "grid_row": null, | |
| "grid_template_areas": null, | |
| "grid_template_columns": null, | |
| "grid_template_rows": null, | |
| "height": null, | |
| "justify_content": null, | |
| "justify_items": null, | |
| "left": null, | |
| "margin": null, | |
| "max_height": null, | |
| "max_width": null, | |
| "min_height": null, | |
| "min_width": null, | |
| "object_fit": null, | |
| "object_position": null, | |
| "order": null, | |
| "overflow": null, | |
| "padding": null, | |
| "right": null, | |
| "top": null, | |
| "visibility": null, | |
| "width": null | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "nbformat": 4, | |
| "nbformat_minor": 0 | |
| } |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment