本文為英文版的機器翻譯版本,如內容有任何歧義或不一致之處,概以英文版為準。
使用混合任務和 PennyLane 執行QAOA演算法
在本節中,您將使用所學到的知識,搭配參數編譯使用 來撰寫實際 PennyLane 的混合式程式。您可以使用演算法指令碼來解決 Quantum 近似最佳化演算法 (QAOA) 問題。程式會建立與傳統 Max Cut 最佳化問題對應的成本函數,指定參數化量子電路,並使用簡單的梯度下降方法來最佳化參數,以將成本函數降至最低。在此範例中,我們為簡單起見在演算法指令碼中產生問題圖表,但對於更典型的使用案例,最佳實務是透過輸入資料組態中的專用管道提供問題規格。旗標parametrize_differentiable
預設為 True
,因此您可以從支援的 上的參數編譯中自動獲得改善執行時間效能的優勢QPUs。
import os import json import time from braket.jobs import save_job_result from braket.jobs.metrics import log_metric import networkx as nx import pennylane as qml from pennylane import numpy as np from matplotlib import pyplot as plt def init_pl_device(device_arn, num_nodes, shots, max_parallel): return qml.device( "braket.aws.qubit", device_arn=device_arn, wires=num_nodes, shots=shots, # Set s3_destination_folder=None to output task results to a default folder s3_destination_folder=None, parallel=True, max_parallel=max_parallel, parametrize_differentiable=True, # This flag is True by default. ) def start_here(): input_dir = os.environ["AMZN_BRAKET_INPUT_DIR"] output_dir = os.environ["AMZN_BRAKET_JOB_RESULTS_DIR"] job_name = os.environ["AMZN_BRAKET_JOB_NAME"] checkpoint_dir = os.environ["AMZN_BRAKET_CHECKPOINT_DIR"] hp_file = os.environ["AMZN_BRAKET_HP_FILE"] device_arn = os.environ["AMZN_BRAKET_DEVICE_ARN"] # Read the hyperparameters with open(hp_file, "r") as f: hyperparams = json.load(f) p = int(hyperparams["p"]) seed = int(hyperparams["seed"]) max_parallel = int(hyperparams["max_parallel"]) num_iterations = int(hyperparams["num_iterations"]) stepsize = float(hyperparams["stepsize"]) shots = int(hyperparams["shots"]) # Generate random graph num_nodes = 6 num_edges = 8 graph_seed = 1967 g = nx.gnm_random_graph(num_nodes, num_edges, seed=graph_seed) # Output figure to file positions = nx.spring_layout(g, seed=seed) nx.draw(g, with_labels=True, pos=positions, node_size=600) plt.savefig(f"{output_dir}/graph.png") # Set up the QAOA problem cost_h, mixer_h = qml.qaoa.maxcut(g) def qaoa_layer(gamma, alpha): qml.qaoa.cost_layer(gamma, cost_h) qml.qaoa.mixer_layer(alpha, mixer_h) def circuit(params, **kwargs): for i in range(num_nodes): qml.Hadamard(wires=i) qml.layer(qaoa_layer, p, params[0], params[1]) dev = init_pl_device(device_arn, num_nodes, shots, max_parallel) np.random.seed(seed) cost_function = qml.ExpvalCost(circuit, cost_h, dev, optimize=True) params = 0.01 * np.random.uniform(size=[2, p]) optimizer = qml.GradientDescentOptimizer(stepsize=stepsize) print("Optimization start") for iteration in range(num_iterations): t0 = time.time() # Evaluates the cost, then does a gradient step to new params params, cost_before = optimizer.step_and_cost(cost_function, params) # Convert cost_before to a float so it's easier to handle cost_before = float(cost_before) t1 = time.time() if iteration == 0: print("Initial cost:", cost_before) else: print(f"Cost at step {iteration}:", cost_before) # Log the current loss as a metric log_metric( metric_name="Cost", value=cost_before, iteration_number=iteration, ) print(f"Completed iteration {iteration + 1}") print(f"Time to complete iteration: {t1 - t0} seconds") final_cost = float(cost_function(params)) log_metric( metric_name="Cost", value=final_cost, iteration_number=num_iterations, ) # We're done with the hybrid job, so save the result. # This will be returned in job.result() save_job_result({"params": params.numpy().tolist(), "cost": final_cost})
注意
所有超導、以閘道為基礎的QPUs來自 都支援參數編譯 Rigetti Computing 脈衝層級程式除外。