机器学习之线性回归 发表于 2018-08-10 | 分类于 机器学习 | Demo: 12345678910111213import numpy as npimport matplotlib.pyplot as pltimport tensorflow as tfnum_points = 1000vectors_set = []for i in range(num_points): x1 = np.random.normal(0.0, 0.55) y1 = x1 * 0.1 + 0.3 + np.random.normal(0.0, 0.03) vectors_set.append([x1, y1])x_data = [v[0] for v in vectors_set]y_data = [v[1] for v in vectors_set] 1234567891011121314151617181920212223242526272829303132# Graphic displayplt.plot(x_data, y_data, 'ro')plt.legend()plt.show()W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))b = tf.Variable(tf.zeros([1]))y = W * x_data + bloss = tf.reduce_mean(tf.square(y - y_data))optimizer = tf.train.GradientDescentOptimizer(0.5)train = optimizer.minimize(loss)init = tf.initialize_all_variables()sess = tf.Session()sess.run(init)for step in range(20): sess.run(train) print(step, sess.run(W), sess.run(b)) print(step, sess.run(loss)) # Graphic display plt.plot(x_data, y_data, 'ro') plt.plot(x_data, sess.run(W) * x_data + sess.run(b)) plt.xlabel('x') plt.xlim(-2, 2) plt.ylim(0.1, 0.6) plt.ylabel('y') plt.legend() plt.show() 效果图: