ํ…์„œํ”Œ๋กœ

  • ๋”ฅ๋Ÿฌ๋‹ ํ”„๋ ˆ์ž„์›Œํฌ ํ…์„œํ”Œ๋กœ๋ฅผ ํ™œ์šฉํ•˜์—ฌ J(w)=w2โˆ’10w+25J(w)=w^2-10w+25 ๋ฅผ ์ตœ์†Œ๋กœ ๋งŒ๋“œ๋Š” ww ๋ฅผ ์ฐพ์•„๋ณด์ž
import numpy as np
import tensorflow as tf

w = tf.Variable(0, dtype=tf.float32)
optimizer = tf.keras.optimizers.Adam(0.1)

def train_step():

	with tf.GradientTape() as tape:
		cost = w ** 2 - 10 * w + 25

	trainable_variables = [w]
	grads = tape.gradient(cost, trainable_variables)
	optimizer.apply_gradients(zip(grads, trainable_variables))
	
for _ in range(1000):
    train_step()
	
print(w)

tf.Variable โ€˜Variable:0โ€™ shape=() dtype=float32, numpy=5.000001

import numpy as np
import tensorflow as tf

w = tf.Variable(0, dtype=tf.float32)
x = np.array([1.0, -10.0, 25.0])
optimizer = tf.keras.optimizers.Adam(0.1)

def cost():
    return x[0] * w ** 2 + x[1] * w + x[2]
	
print(w)
for i in range(1000):
    optimizer.minimize(cost, [w])
print(w)
tf.Variable 'Variable:0' shape=() dtype=float32, numpy=0.0
tf.Variable 'Variable:0' shape=() dtype=float32, numpy=5.000001