<?xml version="1.0"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en">
	<id>https://mediawiki.zeropage.org/api.php?action=feedcontributions&amp;feedformat=atom&amp;user=219.241.59.131</id>
	<title>ZeroWiki - User contributions [en]</title>
	<link rel="self" type="application/atom+xml" href="https://mediawiki.zeropage.org/api.php?action=feedcontributions&amp;feedformat=atom&amp;user=219.241.59.131"/>
	<link rel="alternate" type="text/html" href="https://mediawiki.zeropage.org/index.php/Special:Contributions/219.241.59.131"/>
	<updated>2026-05-15T10:15:37Z</updated>
	<subtitle>User contributions</subtitle>
	<generator>MediaWiki 1.39.8</generator>
	<entry>
		<id>https://mediawiki.zeropage.org/index.php?title=%EB%A8%B8%EC%8B%A0%EB%9F%AC%EB%8B%9D%EC%8A%A4%ED%84%B0%EB%94%94/2016/2016_05_14&amp;diff=50259</id>
		<title>머신러닝스터디/2016/2016 05 14</title>
		<link rel="alternate" type="text/html" href="https://mediawiki.zeropage.org/index.php?title=%EB%A8%B8%EC%8B%A0%EB%9F%AC%EB%8B%9D%EC%8A%A4%ED%84%B0%EB%94%94/2016/2016_05_14&amp;diff=50259"/>
		<updated>2016-05-14T11:25:02Z</updated>

		<summary type="html">&lt;p&gt;219.241.59.131: {CREATE}&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&amp;amp;#91;&amp;amp;#91;pagelist(^(머신러닝스터디/2016))&amp;amp;#93;&amp;amp;#93;&lt;br /&gt;
&lt;br /&gt;
== 내용 ==&lt;br /&gt;
&lt;br /&gt;
== 코드 ==&lt;br /&gt;
&lt;br /&gt;
 # python3&lt;br /&gt;
 import tensorflow as tf&lt;br /&gt;
 import input_data&lt;br /&gt;
 &lt;br /&gt;
 if __name__ == &amp;quot;__main__&amp;quot;:&lt;br /&gt;
 &lt;br /&gt;
     # Logistic Regression&lt;br /&gt;
     # use sigmoid&lt;br /&gt;
 &lt;br /&gt;
     # Input&lt;br /&gt;
     # MNIST&lt;br /&gt;
     # Cross Entropy&lt;br /&gt;
     mnist = input_data.read_data_sets(&amp;quot;MNIST_data/&amp;quot;, one_hot=True)&lt;br /&gt;
     learning_rate = 0.01&lt;br /&gt;
     # x = tf.placeholder(tf.float32, shape=(None, 28, 28))&lt;br /&gt;
     x = tf.placeholder(tf.float32, shape=(None, 28 * 28))&lt;br /&gt;
     y = tf.placeholder(tf.float32, shape=(None, 10))&lt;br /&gt;
 &lt;br /&gt;
     # x = n * (28 * 28)&lt;br /&gt;
     # weight = (28 * 28) * 10&lt;br /&gt;
     weight = tf.Variable(tf.zeros((28 * 28, 10)))&lt;br /&gt;
     bias = tf.Variable(tf.zeros((10,)))&lt;br /&gt;
 &lt;br /&gt;
     # h = n * 10&lt;br /&gt;
     h = tf.sigmoid(tf.matmul(x, weight) + bias)&lt;br /&gt;
 &lt;br /&gt;
     #cost = tf.reduce_sum(tf.add(y * tf.log(h), tf.sub(1.0, y) * tf.log(tf.sub(1.0, h)))))&lt;br /&gt;
 &lt;br /&gt;
     cost = tf.reduce_mean(-(tf.reduce_sum(y * tf.log(h), 1) + tf.reduce_sum((1. -y) * tf.log(1. -h), 1)))&lt;br /&gt;
     optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)&lt;br /&gt;
 &lt;br /&gt;
     init = tf.initialize_all_variables()&lt;br /&gt;
     batch_size = 100&lt;br /&gt;
     display_step = 1&lt;br /&gt;
     with tf.Session() as sess:&lt;br /&gt;
         sess.run(init)&lt;br /&gt;
 &lt;br /&gt;
         # Training cycle&lt;br /&gt;
         for epoch in range(10):&lt;br /&gt;
             avg_cost = 0.&lt;br /&gt;
             total_batch = int(mnist.train.num_examples/batch_size)&lt;br /&gt;
             # Loop over all batches&lt;br /&gt;
             for i in range(total_batch):&lt;br /&gt;
                 batch_xs, batch_ys = mnist.train.next_batch(batch_size)&lt;br /&gt;
                 # Fit training using batch data&lt;br /&gt;
                 sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})&lt;br /&gt;
                 # Compute average loss&lt;br /&gt;
                 avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch&lt;br /&gt;
             # Display logs per epoch step&lt;br /&gt;
             if epoch % display_step == 0:&lt;br /&gt;
                 print( &amp;quot;Epoch:&amp;quot;, &#039;%04d&#039; % (epoch+1), &amp;quot;cost=&amp;quot;, &amp;quot;{:.9f}&amp;quot;.format(avg_cost))&lt;br /&gt;
 &lt;br /&gt;
         # Test model&lt;br /&gt;
         correct_prediction = tf.equal(tf.argmax(h, 1), tf.argmax(y, 1))&lt;br /&gt;
         # Calculate accuracy&lt;br /&gt;
         accuracy = tf.reduce_mean(tf.cast(correct_prediction, &amp;quot;float&amp;quot;))&lt;br /&gt;
         print(&amp;quot;Accuracy:&amp;quot;, accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))&lt;br /&gt;
&lt;/div&gt;</summary>
		<author><name>219.241.59.131</name></author>
	</entry>
</feed>