Example Code - see Azure.Notebooks | Skillshare Projects

57

2

Example Code - see Azure.Notebooks

## https://notebooks.azure.com/anon-x3p1ga/projects/uNNaDLMC


# coding: utf-8

# In[ ]:


import tensorflow as tf
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
import os
from PIL import Image
import matplotlib.pyplot as plt  # Not used- could plot a line trace of test and train at end
import numpy as np


# In[ ]:


X = []
y = []

str_to_num = {'gnome':0,'drone':1}

gnome_folder = 'gnomes/'
drone_folder = 'drones/'
def create_data(folder,name):
for i in os.listdir(folder):
image = Image.open(os.path.join(folder,i))
image = Image.Image.resize(image,[200,200])
x = np.array(image)
X.append(x)
y.append(str_to_num[name])

create_data(gnome_folder,'gnome')
create_data(drone_folder,'drone')


# In[ ]:


x_place = tf.placeholder(tf.float32, shape = [None,200,200,3])
y_place = tf.placeholder(tf.int32, shape = [None,])
one_hot = tf.one_hot(y_place,2)
one_hot = tf.cast(one_hot, tf.float32)

X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.1)


# In[ ]:


input_layer = tf.reshape(x_place, shape=[-1, 200,200,3])

flatten = tf.reshape(input_layer, [-1,(200*200*3)])
fc1 = tf.layers.dense(flatten, units=1024,activation=tf.nn.relu)
fc2 = tf.layers.dense(fc1, units=1024, activation=tf.nn.relu)
fc3 = tf.layers.dense(fc2, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(fc3, rate=0.2)
logits = tf.layers.dense(dropout, units=2)


# In[ ]:

#WARNING:tensorflow:From <ipython-input-11-24de533a10b1>:1: #softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will #be removed in a future version. Instructions for updating: Future major versions of TensorFlow #will allow gradients to flow into the labels input on backprop by default. See `#tf.nn.softmax_cross_entropy_with_logits_v2`.

loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=one_hot)) 

## Need to read the docs on this logits method, not sure what is going on here
optimiser = tf.train.AdamOptimizer()
training_op = optimiser.minimize(loss)

correct_pred = tf.equal(tf.argmax(one_hot,1),tf.argmax(logits,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred,tf.float32))


# In[ ]:


EPOCH_TOTAL = 100
BATCH_SIZE = 50

with tf.Session() as sess:
sess.run(tf.global_variables_initializer())

for epoch in range(EPOCH_TOTAL):
X_train, y_train, = shuffle(X_train, y_train)

for batch_start in range(0, len(X_train), BATCH_SIZE):
batch_end = batch_start + BATCH_SIZE
batch_X, batch_y = X_train[batch_start:batch_end], y_train[batch_start:batch_end]

sess.run(training_op, feed_dict={x_place:batch_X, y_place:batch_y})
train_accuracy = sess.run(accuracy, feed_dict={x_place:X_train, y_place:y_train})
test_accuracy = sess.run(accuracy, feed_dict={x_place:X_test, y_place:y_test})

print("\nEpoch: {}".format(epoch))
print("Train accuracy: {a: 0.8f}".format(a=train_accuracy))
print("Test accuracy: {a: 0.8f}".format(a=test_accuracy))

 

Comments

Please sign in or sign up to comment.