More actions
imported>rabierre ({CREATE}) |
No edit summary |
||
| Line 2: | Line 2: | ||
== 내용 == | == 내용 == | ||
=== 코드 === | === 코드 === | ||
import tensorflow as tf | |||
# AND OR NXOR XOR | |||
# (0, 0) => 0 (0, 0) => 0 (0, 0) => 1 (0, 0) => 0 | |||
# (0, 1) => 0 (0, 1) => 1 (0, 1) => 0 (0, 1) => 1 | |||
# (1, 0) => 0 (1, 0) => 1 (1, 0) => 0 (1, 0) => 1 | |||
# (1, 1) => 1 (1, 1) => 1 (1, 1) => 1 (1, 1) => 0 | |||
W1 = tf.Variable(tf.random_uniform([2, 3])) | |||
b1 = tf.Variable(tf.random_uniform([3])) | |||
W2 = tf.Variable(tf.random_uniform([3, 2])) | |||
b2 = tf.Variable(tf.random_uniform([2])) | |||
W3 = tf.Variable(tf.random_uniform([2, 1])) | |||
b3 = tf.Variable(tf.random_uniform([1])) | |||
def logic_gate(x): | |||
hidden1 = tf.nn.relu(tf.matmul(x, W1) + b1) | |||
hidden2 = tf.nn.relu(tf.matmul(hidden1, W2) + b2) | |||
return tf.nn.sigmoid(tf.matmul(hidden2, W3) + b3) | |||
x = tf.placeholder("float", [None, 2]) | |||
y = tf.placeholder("float", [None, 1]) | |||
value = logic_gate(x) | |||
loss = -tf.reduce_mean((y*tf.log(value) + (1-y)*tf.log(1-value))) | |||
optimize = tf.train.AdagradOptimizer(0.01).minimize(loss) | |||
init = tf.initialize_all_variables() | |||
with tf.Session() as sess: | |||
sess.run(init) | |||
for i in range(30001): | |||
result = sess.run(optimize, feed_dict={x: [[0, 0], [0, 1], [1, 0], [1, 1]], y: [[1], [0], [0], [1]]}) | |||
if (i % 1000 == 0): | |||
print("Epoch: ", i) | |||
print(sess.run([value, loss], feed_dict={x: [[0, 0], [0, 1], [1, 0], [1, 1]], y: [[1], [0], [0], [1]]})) | |||
== 후기 == | == 후기 == | ||
* [[서지혜]]: relu 좋은 거 같음. 튜닝 방법 일일이 값 바꾸는 것 뿐인가,, | |||
== 다음 시간에는 == | == 다음 시간에는 == | ||
== 더 보기 == | == 더 보기 == | ||
Revision as of 06:58, 11 June 2016
[[pagelist(^(머신러닝스터디/2016))]]
내용
코드
import tensorflow as tf
# AND OR NXOR XOR
# (0, 0) => 0 (0, 0) => 0 (0, 0) => 1 (0, 0) => 0
# (0, 1) => 0 (0, 1) => 1 (0, 1) => 0 (0, 1) => 1
# (1, 0) => 0 (1, 0) => 1 (1, 0) => 0 (1, 0) => 1
# (1, 1) => 1 (1, 1) => 1 (1, 1) => 1 (1, 1) => 0
W1 = tf.Variable(tf.random_uniform([2, 3]))
b1 = tf.Variable(tf.random_uniform([3]))
W2 = tf.Variable(tf.random_uniform([3, 2]))
b2 = tf.Variable(tf.random_uniform([2]))
W3 = tf.Variable(tf.random_uniform([2, 1]))
b3 = tf.Variable(tf.random_uniform([1]))
def logic_gate(x):
hidden1 = tf.nn.relu(tf.matmul(x, W1) + b1)
hidden2 = tf.nn.relu(tf.matmul(hidden1, W2) + b2)
return tf.nn.sigmoid(tf.matmul(hidden2, W3) + b3)
x = tf.placeholder("float", [None, 2])
y = tf.placeholder("float", [None, 1])
value = logic_gate(x)
loss = -tf.reduce_mean((y*tf.log(value) + (1-y)*tf.log(1-value)))
optimize = tf.train.AdagradOptimizer(0.01).minimize(loss)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for i in range(30001):
result = sess.run(optimize, feed_dict={x: [[0, 0], [0, 1], [1, 0], [1, 1]], y: [[1], [0], [0], [1]]})
if (i % 1000 == 0):
print("Epoch: ", i)
print(sess.run([value, loss], feed_dict={x: [[0, 0], [0, 1], [1, 0], [1, 1]], y: [[1], [0], [0], [1]]}))
후기
- 서지혜: relu 좋은 거 같음. 튜닝 방법 일일이 값 바꾸는 것 뿐인가,,