More actions
imported>rabierre ({CREATE}) |
imported>rabierre No edit summary |
||
| Line 1: | Line 1: | ||
import tensorflow as tf | |||
# AND | |||
# (0, 0) => 0 | # (0, 0) => 0 | ||
# (0, 1) => 0 | # (0, 1) => 0 | ||
# (1, 0) => 0 | # (1, 0) => 0 | ||
# (1, 1) => 1 | # (1, 1) => 1 | ||
W1 = tf.Variable(tf.random_uniform([2, 2])) | W1 = tf.Variable(tf.random_uniform([2, 2])) | ||
b1 = tf.Variable(tf.random_uniform([2])) | b1 = tf.Variable(tf.random_uniform([2])) | ||
W2 = tf.Variable(tf.random_uniform([2, 1])) | W2 = tf.Variable(tf.random_uniform([2, 1])) | ||
b2 = tf.Variable(tf.random_uniform([1])) | b2 = tf.Variable(tf.random_uniform([1])) | ||
def logic_gate(x): | def logic_gate(x): | ||
hidden = tf.sigmoid(tf.add(tf.matmul(x, W1), b1)) | hidden = tf.sigmoid(tf.add(tf.matmul(x, W1), b1)) | ||
return tf.sigmoid(tf.add(tf.matmul(hidden, W2), b2)) | return tf.sigmoid(tf.add(tf.matmul(hidden, W2), b2)) | ||
x = tf.placeholder("float", [None, 2]) | x = tf.placeholder("float", [None, 2]) | ||
y = tf.placeholder("float", [None, 1]) | y = tf.placeholder("float", [None, 1]) | ||
value = logic_gate(x) | value = logic_gate(x) | ||
loss = tf.reduce_mean(-(y * tf.log(value) - ((1-y) * tf.log(1-value)))) | loss = tf.reduce_mean(-(y * tf.log(value) - ((1-y) * tf.log(1-value)))) | ||
optimize = tf.train.GradientDescentOptimizer(0.01).minimize(loss) | optimize = tf.train.GradientDescentOptimizer(0.01).minimize(loss) | ||
init = tf.initialize_all_variables() | init = tf.initialize_all_variables() | ||
with tf.Session() as sess: | with tf.Session() as sess: | ||
sess.run(init) | sess.run(init) | ||
Revision as of 10:22, 28 May 2016
import tensorflow as tf
# AND
# (0, 0) => 0
# (0, 1) => 0
# (1, 0) => 0
# (1, 1) => 1
W1 = tf.Variable(tf.random_uniform([2, 2]))
b1 = tf.Variable(tf.random_uniform([2]))
W2 = tf.Variable(tf.random_uniform([2, 1]))
b2 = tf.Variable(tf.random_uniform([1]))
def logic_gate(x):
hidden = tf.sigmoid(tf.add(tf.matmul(x, W1), b1))
return tf.sigmoid(tf.add(tf.matmul(hidden, W2), b2))
x = tf.placeholder("float", [None, 2])
y = tf.placeholder("float", [None, 1])
value = logic_gate(x)
loss = tf.reduce_mean(-(y * tf.log(value) - ((1-y) * tf.log(1-value))))
optimize = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for i in range(30001):
result = sess.run(optimize, feed_dict={x: [[0, 0], [0, 1], [1, 0], [1, 1]], y: [[1], [0], [0], [1]]})
if (i % 1000 == 0):
print(i)
print(sess.run([value, loss], feed_dict={x: [[0, 0], [0, 1], [1, 0], [1, 1]], y: [[1], [0], [0], [1]]}))