<?xml version="1.0"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en">
	<id>https://mediawiki.zeropage.org/api.php?action=feedcontributions&amp;feedformat=atom&amp;user=59.11.128.254</id>
	<title>ZeroWiki - User contributions [en]</title>
	<link rel="self" type="application/atom+xml" href="https://mediawiki.zeropage.org/api.php?action=feedcontributions&amp;feedformat=atom&amp;user=59.11.128.254"/>
	<link rel="alternate" type="text/html" href="https://mediawiki.zeropage.org/index.php/Special:Contributions/59.11.128.254"/>
	<updated>2026-05-14T17:20:03Z</updated>
	<subtitle>User contributions</subtitle>
	<generator>MediaWiki 1.39.8</generator>
	<entry>
		<id>https://mediawiki.zeropage.org/index.php?title=%EB%A8%B8%EC%8B%A0%EB%9F%AC%EB%8B%9D%EC%8A%A4%ED%84%B0%EB%94%94/2016/2016_05_28&amp;diff=50275</id>
		<title>머신러닝스터디/2016/2016 05 28</title>
		<link rel="alternate" type="text/html" href="https://mediawiki.zeropage.org/index.php?title=%EB%A8%B8%EC%8B%A0%EB%9F%AC%EB%8B%9D%EC%8A%A4%ED%84%B0%EB%94%94/2016/2016_05_28&amp;diff=50275"/>
		<updated>2016-05-28T11:07:40Z</updated>

		<summary type="html">&lt;p&gt;59.11.128.254: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&amp;amp;#91;&amp;amp;#91;pagelist(^(머신러닝스터디/2016))&amp;amp;#93;&amp;amp;#93;&lt;br /&gt;
== 내용 ==&lt;br /&gt;
* Basic Logix Gate만들어보자!&lt;br /&gt;
** AND, OR, NXOR, XOR&lt;br /&gt;
=== 코드 ===&lt;br /&gt;
 import tensorflow as tf&lt;br /&gt;
 # AND          OR           NXOR          XOR&lt;br /&gt;
 # (0, 0) =&amp;amp;gt; 0  (0, 0) =&amp;amp;gt; 0  (0, 0) =&amp;amp;gt; 1  (0, 0) =&amp;amp;gt; 0&lt;br /&gt;
 # (0, 1) =&amp;amp;gt; 0  (0, 1) =&amp;amp;gt; 1  (0, 1) =&amp;amp;gt; 0  (0, 1) =&amp;amp;gt; 1&lt;br /&gt;
 # (1, 0) =&amp;amp;gt; 0  (1, 0) =&amp;amp;gt; 1  (1, 0) =&amp;amp;gt; 0  (1, 0) =&amp;amp;gt; 1&lt;br /&gt;
 # (1, 1) =&amp;amp;gt; 1  (1, 1) =&amp;amp;gt; 1  (1, 1) =&amp;amp;gt; 1  (1, 1) =&amp;amp;gt; 0&lt;br /&gt;
 &lt;br /&gt;
 W1 = tf.Variable(tf.random_uniform([2, 2]))&lt;br /&gt;
 b1 = tf.Variable(tf.random_uniform([2]))&lt;br /&gt;
 &lt;br /&gt;
 W2 = tf.Variable(tf.random_uniform([2, 1]))&lt;br /&gt;
 b2 = tf.Variable(tf.random_uniform([1]))&lt;br /&gt;
 &lt;br /&gt;
 def logic_gate(x):&lt;br /&gt;
     hidden = tf.sigmoid(tf.matmul(x, W1) + b1)&lt;br /&gt;
     return tf.sigmoid(tf.matmul(hidden, W2) + b2)&lt;br /&gt;
 &lt;br /&gt;
 x = tf.placeholder(&amp;quot;float&amp;quot;, [None, 2])&lt;br /&gt;
 y = tf.placeholder(&amp;quot;float&amp;quot;, [None, 1])&lt;br /&gt;
 &lt;br /&gt;
 value = logic_gate(x)&lt;br /&gt;
 loss = tf.reduce_sum(tf.pow(y-value, 2))&lt;br /&gt;
 # loss = tf.reduce_mean(-tf.reduce_sum(y*tf.log(value), reduction_indices=1)) # Don&#039;t work&lt;br /&gt;
 optimize = tf.train.GradientDescentOptimizer(0.01).minimize(loss)&lt;br /&gt;
 &lt;br /&gt;
 init = tf.initialize_all_variables()&lt;br /&gt;
 &lt;br /&gt;
 with tf.Session() as sess:&lt;br /&gt;
     sess.run(init)&lt;br /&gt;
     for i in range(30001):&lt;br /&gt;
         result = sess.run(optimize, feed_dict={x: [[0, 0], [0, 1], [1, 0], [1, 1]], y: [[1], [0], [0], [1]]})&lt;br /&gt;
         if (i % 1000 == 0):&lt;br /&gt;
             print(&amp;quot;Epoch: &amp;quot;, i)&lt;br /&gt;
             print(sess.run([value, loss], feed_dict={x: [[0, 0], [0, 1], [1, 0], [1, 1]], y: [[1], [0], [0], [1]]}))&lt;br /&gt;
== 후기 ==&lt;br /&gt;
== 다음 시간에는 ==&lt;br /&gt;
* ML Week 5 Back Propagation 실습&lt;br /&gt;
== 더 보기 ==&lt;br /&gt;
&lt;/div&gt;</summary>
		<author><name>59.11.128.254</name></author>
	</entry>
	<entry>
		<id>https://mediawiki.zeropage.org/index.php?title=%EB%A8%B8%EC%8B%A0%EB%9F%AC%EB%8B%9D%EC%8A%A4%ED%84%B0%EB%94%94/2016/2016_05_28&amp;diff=50274</id>
		<title>머신러닝스터디/2016/2016 05 28</title>
		<link rel="alternate" type="text/html" href="https://mediawiki.zeropage.org/index.php?title=%EB%A8%B8%EC%8B%A0%EB%9F%AC%EB%8B%9D%EC%8A%A4%ED%84%B0%EB%94%94/2016/2016_05_28&amp;diff=50274"/>
		<updated>2016-05-28T11:06:50Z</updated>

		<summary type="html">&lt;p&gt;59.11.128.254: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&amp;amp;#91;&amp;amp;#91;pagelist(^(머신러닝스터디/2016))&amp;amp;#93;&amp;amp;#93;&lt;br /&gt;
== 내용 ==&lt;br /&gt;
=== 코드 ===&lt;br /&gt;
 import tensorflow as tf&lt;br /&gt;
 # AND          OR           NXOR          XOR&lt;br /&gt;
 # (0, 0) =&amp;amp;gt; 0  (0, 0) =&amp;amp;gt; 0  (0, 0) =&amp;amp;gt; 1  (0, 0) =&amp;amp;gt; 0&lt;br /&gt;
 # (0, 1) =&amp;amp;gt; 0  (0, 1) =&amp;amp;gt; 1  (0, 1) =&amp;amp;gt; 0  (0, 1) =&amp;amp;gt; 1&lt;br /&gt;
 # (1, 0) =&amp;amp;gt; 0  (1, 0) =&amp;amp;gt; 1  (1, 0) =&amp;amp;gt; 0  (1, 0) =&amp;amp;gt; 1&lt;br /&gt;
 # (1, 1) =&amp;amp;gt; 1  (1, 1) =&amp;amp;gt; 1  (1, 1) =&amp;amp;gt; 1  (1, 1) =&amp;amp;gt; 0&lt;br /&gt;
 &lt;br /&gt;
 W1 = tf.Variable(tf.random_uniform([2, 2]))&lt;br /&gt;
 b1 = tf.Variable(tf.random_uniform([2]))&lt;br /&gt;
 &lt;br /&gt;
 W2 = tf.Variable(tf.random_uniform([2, 1]))&lt;br /&gt;
 b2 = tf.Variable(tf.random_uniform([1]))&lt;br /&gt;
 &lt;br /&gt;
 def logic_gate(x):&lt;br /&gt;
     hidden = tf.sigmoid(tf.matmul(x, W1) + b1)&lt;br /&gt;
     return tf.sigmoid(tf.matmul(hidden, W2) + b2)&lt;br /&gt;
 &lt;br /&gt;
 x = tf.placeholder(&amp;quot;float&amp;quot;, [None, 2])&lt;br /&gt;
 y = tf.placeholder(&amp;quot;float&amp;quot;, [None, 1])&lt;br /&gt;
 &lt;br /&gt;
 value = logic_gate(x)&lt;br /&gt;
 loss = tf.reduce_sum(tf.pow(y-value, 2))&lt;br /&gt;
 # loss = tf.reduce_mean(-tf.reduce_sum(y*tf.log(value), reduction_indices=1)) # Don&#039;t work&lt;br /&gt;
 optimize = tf.train.GradientDescentOptimizer(0.01).minimize(loss)&lt;br /&gt;
 &lt;br /&gt;
 init = tf.initialize_all_variables()&lt;br /&gt;
 &lt;br /&gt;
 with tf.Session() as sess:&lt;br /&gt;
     sess.run(init)&lt;br /&gt;
     for i in range(30001):&lt;br /&gt;
         result = sess.run(optimize, feed_dict={x: [[0, 0], [0, 1], [1, 0], [1, 1]], y: [[1], [0], [0], [1]]})&lt;br /&gt;
         if (i % 1000 == 0):&lt;br /&gt;
             print(&amp;quot;Epoch: &amp;quot;, i)&lt;br /&gt;
             print(sess.run([value, loss], feed_dict={x: [[0, 0], [0, 1], [1, 0], [1, 1]], y: [[1], [0], [0], [1]]}))&lt;br /&gt;
== 후기 ==&lt;br /&gt;
== 다음 시간에는 ==&lt;br /&gt;
* ML Week 5 Back Propagation 실습&lt;br /&gt;
== 더 보기 ==&lt;br /&gt;
&lt;/div&gt;</summary>
		<author><name>59.11.128.254</name></author>
	</entry>
	<entry>
		<id>https://mediawiki.zeropage.org/index.php?title=%EC%A0%95%EB%AA%A8/2016.5.18&amp;diff=72254</id>
		<title>정모/2016.5.18</title>
		<link rel="alternate" type="text/html" href="https://mediawiki.zeropage.org/index.php?title=%EC%A0%95%EB%AA%A8/2016.5.18&amp;diff=72254"/>
		<updated>2016-05-28T11:03:42Z</updated>

		<summary type="html">&lt;p&gt;59.11.128.254: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;= 예정 =&lt;br /&gt;
* [[OMS]] : 이민석 - 하스켈 성능 튜닝&lt;br /&gt;
* 자물쇠 비밀번호 변경~~!&lt;br /&gt;
* 진행 사항 공유&lt;br /&gt;
** 스터디 및 프로젝트 - [[활동지도/2016]]&lt;br /&gt;
** [[AngelsCamp/2016]]&lt;br /&gt;
* 기타&lt;br /&gt;
** 코드 레이스 - 5월 20일&lt;br /&gt;
** [https://docs.google.com/spreadsheets/d/1mpXSWXppW-9rYOlrkQ2ckp3UnTJO_UXYk707E6OY7DE/edit?usp=sharing 데블스 캠프 강사 모집중] - 6월 22일 ~ 6월 26일&lt;br /&gt;
** [http://worlditshow.co.kr/ World IT Show 2016] - 5월 17일 ~ 5월 20일&lt;br /&gt;
= 진행 =&lt;br /&gt;
&lt;br /&gt;
* [[OMS]] : 이민석 - 하스켈 성능 튜닝&lt;br /&gt;
** 온라인 저지에 하스켈을 쓰지 맙시다&lt;br /&gt;
** 다음 OMS 주자 : 장혁재 - 아름다운 렌더링 기법 몇가지&lt;br /&gt;
* 자물쇠 비밀번호 변경: 앞으로 슬랙에 공지할 예정&lt;br /&gt;
* 스터디 및 프로젝트&lt;br /&gt;
** 알고리즘 스터디&lt;br /&gt;
** Google CodeJam Round.C번 문제 풀려다가 또다시 좌절&lt;br /&gt;
** cppall&lt;br /&gt;
** lambda expression, uniform initialization 진행&lt;br /&gt;
** cppall/씨뿔뿔&lt;br /&gt;
** 진행 못함&lt;br /&gt;
** 알고하자&lt;br /&gt;
** 진행 못함&lt;br /&gt;
** SIN&lt;br /&gt;
** NN영상을 봄. 이후 공유는 X&lt;br /&gt;
** Centos7&lt;br /&gt;
** 진행자 불참&lt;br /&gt;
** 알파고&lt;br /&gt;
** 진행자 불참&lt;br /&gt;
** [[머신러닝스터디/2016/2016_05_14| Machine Learning Study]]&lt;br /&gt;
** Tensorflow를 이용한 Logistic Regression 코딩&lt;br /&gt;
** Softmax function 대신 Sigmoid와 Cross Entropy Function를 직접 코딩&lt;br /&gt;
** CC&lt;br /&gt;
** 진행자 불참&lt;br /&gt;
** Python 파보기&lt;br /&gt;
** C99, Cextream 등 학교에서 가르쳐주지 않는 것들 ~~(딴짓)~~ 함.&lt;br /&gt;
** CS&lt;br /&gt;
** C# 기본적인 구조, 변수와 자료형 진행&lt;br /&gt;
** 내용이 너무 많아서 적지 못하였습니다. 내용 요약되어 있어요.&lt;br /&gt;
** 프로랭딸러&lt;br /&gt;
** 자유롭게 알고리즘 랭킹을 올리는 스터디&lt;br /&gt;
** Wolframite&lt;br /&gt;
** 내부 구조 변경&lt;br /&gt;
** 덕분에 난이도가 좀 쉬워짐&lt;br /&gt;
** SCGI 구현을 위해 노력중&lt;br /&gt;
** 개발자가 코드가 부끄럽다 하시어 저장소가 비공개로 변경&lt;br /&gt;
&lt;br /&gt;
* Angels Camp 후기&lt;br /&gt;
** 소박하게 진행&lt;br /&gt;
** 결과물은 어느정도 완성됨&lt;br /&gt;
 &lt;br /&gt;
* devil&#039;s camp&lt;br /&gt;
** 강사모집합니다.&lt;br /&gt;
** &#039;&#039;&#039;D2에서 강사 지원이 있으니 듣고싶으신 세션이 있으시면 회장께 말해주세요. 자세한 내용은 홈페이지에 기재하겠습니다.&#039;&#039;&#039;&lt;br /&gt;
** 연락 돌렸습니다.&lt;br /&gt;
&lt;br /&gt;
 *새내기? :김남규, 이강진&lt;br /&gt;
= 후기 및 코멘트 =&lt;br /&gt;
* [[서지혜]]: 와 스터디 되게 많네요&lt;br /&gt;
* [[이민석]]: 발표 후 결국 문제를 해결하고 뒷이야기를 만들었습니다.&lt;br /&gt;
** http://www.slideshare.net/codeonwort/2-62452949&lt;br /&gt;
&lt;/div&gt;</summary>
		<author><name>59.11.128.254</name></author>
	</entry>
	<entry>
		<id>https://mediawiki.zeropage.org/index.php?title=%EB%A8%B8%EC%8B%A0%EB%9F%AC%EB%8B%9D%EC%8A%A4%ED%84%B0%EB%94%94/2016/2016_05_28&amp;diff=50273</id>
		<title>머신러닝스터디/2016/2016 05 28</title>
		<link rel="alternate" type="text/html" href="https://mediawiki.zeropage.org/index.php?title=%EB%A8%B8%EC%8B%A0%EB%9F%AC%EB%8B%9D%EC%8A%A4%ED%84%B0%EB%94%94/2016/2016_05_28&amp;diff=50273"/>
		<updated>2016-05-28T10:51:42Z</updated>

		<summary type="html">&lt;p&gt;59.11.128.254: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt; import tensorflow as tf&lt;br /&gt;
 # AND          OR           NXOR          XOR&lt;br /&gt;
 # (0, 0) =&amp;amp;gt; 0  (0, 0) =&amp;amp;gt; 0  (0, 0) =&amp;amp;gt; 1  (0, 0) =&amp;amp;gt; 0&lt;br /&gt;
 # (0, 1) =&amp;amp;gt; 0  (0, 1) =&amp;amp;gt; 1  (0, 1) =&amp;amp;gt; 0  (0, 1) =&amp;amp;gt; 1&lt;br /&gt;
 # (1, 0) =&amp;amp;gt; 0  (1, 0) =&amp;amp;gt; 1  (1, 0) =&amp;amp;gt; 0  (1, 0) =&amp;amp;gt; 1&lt;br /&gt;
 # (1, 1) =&amp;amp;gt; 1  (1, 1) =&amp;amp;gt; 1  (1, 1) =&amp;amp;gt; 1  (1, 1) =&amp;amp;gt; 0&lt;br /&gt;
 &lt;br /&gt;
 W1 = tf.Variable(tf.random_uniform([2, 2]))&lt;br /&gt;
 b1 = tf.Variable(tf.random_uniform([2]))&lt;br /&gt;
 &lt;br /&gt;
 W2 = tf.Variable(tf.random_uniform([2, 1]))&lt;br /&gt;
 b2 = tf.Variable(tf.random_uniform([1]))&lt;br /&gt;
 &lt;br /&gt;
 def logic_gate(x):&lt;br /&gt;
     hidden = tf.sigmoid(tf.matmul(x, W1) + b1)&lt;br /&gt;
     return tf.sigmoid(tf.matmul(hidden, W2) + b2)&lt;br /&gt;
 &lt;br /&gt;
 x = tf.placeholder(&amp;quot;float&amp;quot;, [None, 2])&lt;br /&gt;
 y = tf.placeholder(&amp;quot;float&amp;quot;, [None, 1])&lt;br /&gt;
 &lt;br /&gt;
 value = logic_gate(x)&lt;br /&gt;
 loss = tf.reduce_sum(tf.pow(y-value, 2))&lt;br /&gt;
 # loss = tf.reduce_mean(-tf.reduce_sum(y*tf.log(value), reduction_indices=1)) # Don&#039;t work&lt;br /&gt;
 optimize = tf.train.GradientDescentOptimizer(0.01).minimize(loss)&lt;br /&gt;
 &lt;br /&gt;
 init = tf.initialize_all_variables()&lt;br /&gt;
 &lt;br /&gt;
 with tf.Session() as sess:&lt;br /&gt;
     sess.run(init)&lt;br /&gt;
     for i in range(30001):&lt;br /&gt;
         result = sess.run(optimize, feed_dict={x: [[0, 0], [0, 1], [1, 0], [1, 1]], y: [[1], [0], [0], [1]]})&lt;br /&gt;
         if (i % 1000 == 0):&lt;br /&gt;
             print(&amp;quot;Epoch: &amp;quot;, i)&lt;br /&gt;
             print(sess.run([value, loss], feed_dict={x: [[0, 0], [0, 1], [1, 0], [1, 1]], y: [[1], [0], [0], [1]]}))&lt;br /&gt;
&lt;/div&gt;</summary>
		<author><name>59.11.128.254</name></author>
	</entry>
</feed>