Sunday, June 6, 2021

DeepDream

1. In InceptionNet , there are 11 layers where convolutions are concatenated. Exciting
these layers in different combinations results in different outputs.
So you can use layers "mixed0" to "mixed10" in the layers array.
In general, exciting as we go to deep layers, we get good extracted features. Hence
there will be a difference between images generated by deep layers vs shallow layers.

2. It looks like there is a bug in the tutorial
in the method run_deep_dream_simple

The statement :
steps_remaining -= run_steps

should ideally be

steps_remaining -= 1

The current statement terminates the loop immediately after one run.

3. Another important point to be noted is we are using GRADIENT ASCENT , not DESCENT.
---------------------------------------------------------------------------------------
import tensorflow as tf 
from tensorflow import keras 
import PIL 
import IPython.display as display 
import numpy as np 

url = "https://storage.googleapis.com/download.tensorflow.org/example_images/YellowLabradorLooking_new.jpg"

def download(url , max_dim = None) : 
  name = url.split("/")[-1]
  image_file = tf.keras.utils.get_file(name, origin = url)
  

  img = PIL.Image.open(image_file)
  if max_dim : 
    img.thumbnail((max_dim , max_dim))
  return np.array(img)

def deprocess(img)  : 
  img =  255 *(img + 1.0 ) /2.0
  img = tf.cast(img , tf.uint8)
  return img 

def show(img) : 
  display.display(PIL.Image.fromarray(np.array(img)))  


original_image = download(url , max_dim = 500)
show(original_image)
display.display(display.HTML("image"))


base_model = tf.keras.applications.InceptionV3(include_top= False , weights = "imagenet")
names = ["mixed3" , "mixed5"]
layers = [base_model.get_layer(name).output for name in names]

dream_model = tf.keras.Model(inputs = base_model.input , outputs = layers)


def calc_loss(model , img) : 
  img_batch = tf.expand_dims(img, axis = 0
  activations = model(img_batch)
  if len(activations) ==1 : 
    activations = [activations]

  losses = [] 
  for act in activations: 
    loss = tf.reduce_mean(act)    
    losses.append(loss)

  return tf.reduce_sum(loss) 

class DeepDream(tf.Module) : 
  def __init__(self , model) :
    self.model = model 

  @tf.function(
      input_signature = (
                         tf.TensorSpec(shape = [None , None,3] , dtype = tf.float32) , 
                         tf.TensorSpec(shape = [] , dtype = tf.int32) , 
                         tf.TensorSpec(shape = [] , dtype = tf.float32) , 
      )
  )
  def __call__(self , img , steps , step_size)   : 
    loss = tf.constant(0.0)

    for  n in range(steps) : 
      with tf.GradientTape() as tape : 
        tape.watch(img)
        loss  = calc_loss(self.model, img)

      gradients = tape.gradient(loss , img)
      gradients = gradients / (tf.math.reduce_std(gradients)) + 1e-8

      img = img + gradients * step_size
      img = tf.clip_by_value(img , -1 , 1)
    return loss , img 

deepdream = DeepDream(dream_model)    

def run_deep_dream_simple(img , steps = 100 , step_size = 0.01) : 
  img = tf.keras.applications.inception_v3.preprocess_input(img) 
  img = tf.convert_to_tensor(img)
  step_size = tf.convert_to_tensor(step_size)

  steps_remaining = steps
  step = 0 
  while steps_remaining : 
    if  steps_remaining > 100 : 
      run_steps = tf.constant(100)
    else :
      run_steps = tf.constant(steps_remaining)      

      steps_remaining -= 1 
      step += run_steps
      
      loss , img =  deepdream(img , run_steps , tf.constant(step_size))     

      show(deprocess(img))
      print"Step {} , loss {} ".format(step , loss))

  result = deprocess(img)
  show(result)

  return result


dream_img = run_deep_dream_simple(img = original_image , steps = 100 , step_size = 0.01)

No comments:

Post a Comment

 using Microsoft.AspNetCore.Mvc; using System.Xml.Linq; using System.Xml.XPath; //<table class="common-table medium js-table js-stre...