More actions
imported>rabierre No edit summary |
No edit summary |
||
| Line 1: | Line 1: | ||
import tensorflow as tf | import tensorflow as tf | ||
# AND | # AND OR NXOR XOR | ||
# (0, 0) => 0 | # (0, 0) => 0 (0, 0) => 0 (0, 0) => 1 (0, 0) => 0 | ||
# (0, 1) => 0 | # (0, 1) => 0 (0, 1) => 1 (0, 1) => 0 (0, 1) => 1 | ||
# (1, 0) => 0 | # (1, 0) => 0 (1, 0) => 1 (1, 0) => 0 (1, 0) => 1 | ||
# (1, 1) => 1 | # (1, 1) => 1 (1, 1) => 1 (1, 1) => 1 (1, 1) => 0 | ||
W1 = tf.Variable(tf.random_uniform([2, 2])) | W1 = tf.Variable(tf.random_uniform([2, 2])) | ||
| Line 13: | Line 13: | ||
def logic_gate(x): | def logic_gate(x): | ||
hidden = tf.sigmoid | hidden = tf.sigmoid(tf.matmul(x, W1) + b1) | ||
return tf.sigmoid | return tf.sigmoid(tf.matmul(hidden, W2) + b2) | ||
x = tf.placeholder("float", [None, 2]) | x = tf.placeholder("float", [None, 2]) | ||
| Line 20: | Line 20: | ||
value = logic_gate(x) | value = logic_gate(x) | ||
loss = tf. | loss = tf.reduce_sum(tf.pow(y-value, 2)) | ||
# loss = tf.reduce_mean(-tf.reduce_sum(y*tf.log(value), reduction_indices=1)) # Don't work | |||
optimize = tf.train.GradientDescentOptimizer(0.01).minimize(loss) | optimize = tf.train.GradientDescentOptimizer(0.01).minimize(loss) | ||
| Line 31: | Line 31: | ||
result = sess.run(optimize, feed_dict={x: [[0, 0], [0, 1], [1, 0], [1, 1]], y: [[1], [0], [0], [1]]}) | result = sess.run(optimize, feed_dict={x: [[0, 0], [0, 1], [1, 0], [1, 1]], y: [[1], [0], [0], [1]]}) | ||
if (i % 1000 == 0): | if (i % 1000 == 0): | ||
print(i) | print("Epoch: ", i) | ||
print(sess.run([value, loss], feed_dict={x: [[0, 0], [0, 1], [1, 0], [1, 1]], y: [[1], [0], [0], [1]]})) | print(sess.run([value, loss], feed_dict={x: [[0, 0], [0, 1], [1, 0], [1, 1]], y: [[1], [0], [0], [1]]})) | ||
Revision as of 10:51, 28 May 2016
import tensorflow as tf
# AND OR NXOR XOR
# (0, 0) => 0 (0, 0) => 0 (0, 0) => 1 (0, 0) => 0
# (0, 1) => 0 (0, 1) => 1 (0, 1) => 0 (0, 1) => 1
# (1, 0) => 0 (1, 0) => 1 (1, 0) => 0 (1, 0) => 1
# (1, 1) => 1 (1, 1) => 1 (1, 1) => 1 (1, 1) => 0
W1 = tf.Variable(tf.random_uniform([2, 2]))
b1 = tf.Variable(tf.random_uniform([2]))
W2 = tf.Variable(tf.random_uniform([2, 1]))
b2 = tf.Variable(tf.random_uniform([1]))
def logic_gate(x):
hidden = tf.sigmoid(tf.matmul(x, W1) + b1)
return tf.sigmoid(tf.matmul(hidden, W2) + b2)
x = tf.placeholder("float", [None, 2])
y = tf.placeholder("float", [None, 1])
value = logic_gate(x)
loss = tf.reduce_sum(tf.pow(y-value, 2))
# loss = tf.reduce_mean(-tf.reduce_sum(y*tf.log(value), reduction_indices=1)) # Don't work
optimize = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for i in range(30001):
result = sess.run(optimize, feed_dict={x: [[0, 0], [0, 1], [1, 0], [1, 1]], y: [[1], [0], [0], [1]]})
if (i % 1000 == 0):
print("Epoch: ", i)
print(sess.run([value, loss], feed_dict={x: [[0, 0], [0, 1], [1, 0], [1, 1]], y: [[1], [0], [0], [1]]}))