強化學習學習總結(三)——Sarsa

1.qlearning和sarsa 區別

(1)qlearning——off-policy:離線

(2)sarsa——on-policy:在線

 

2.程序

學習模式不同

class SarsaTable(RL):

    def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):
        super(SarsaTable, self).__init__(actions, learning_rate, reward_decay, e_greedy)

    def learn(self, s, a, r, s_, a_):
        self.check_state_exist(s_)
        q_predict = self.q_table.loc[s, a]
        if s_ != 'terminal':
            q_target = r + self.gamma * self.q_table.loc[s_, a_]  # next state is not terminal
        else:
            q_target = r  # next state is terminal
        self.q_table.loc[s, a] += self.lr * (q_target - q_predict)  # update

更新模式不同

def update():
    for episode in range(100):
        # initial observation
        observation = env.reset()
        # action
        action = RL.choose_action(str(observation))

        while True:
            # fresh env
            env.render()

            # 獲取獎勵和觀測值和是否結束
            observation_, reward, done = env.step(action)
            # 下一步action
            action_ = RL.choose_action(str(observation_))
            #  Sarsa
            RL.learn(str(observation), action, reward, str(observation_), action_)
            # 更新 observation and action
            observation = observation_
            action = action_

            # break while loop when end of this episode
            if done:
                break

    # end of game
    print('game over')
    env.destroy()

if __name__ == "__main__":
    env = Maze()
    RL = SarsaTable(actions=list(range(env.n_actions)))

    env.after(100, update)
    env.mainloop()

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章