UtilizzaAmazon BraketHybrid Jobs per eseguire un algoritmo QAOA - Amazon Braket

Le traduzioni sono generate tramite traduzione automatica. In caso di conflitto tra il contenuto di una traduzione e la versione originale in Inglese, quest'ultima prevarrà.

UtilizzaAmazon BraketHybrid Jobs per eseguire un algoritmo QAOA

In questa sezione usi ciò che hai imparato per scrivere un vero programma ibrido usando PennyLane. Lo script dell'algoritmo viene utilizzato per risolvere un problema QAOA (Quantum Approximate Optimization Algorithm). Crea una funzione di costo corrispondente a un classico problema di ottimizzazione Max Cut, specifica un circuito quantistico parametrizzato e utilizza un semplice metodo di discesa del gradiente per ottimizzare i parametri in modo da ridurre al minimo la funzione di costo. In questo esempio generiamo il grafico del problema nello script dell'algoritmo per semplicità, ma per casi d'uso più tipici è considerata una buona pratica fornire la specifica del problema attraverso un canale dedicato nella configurazione dei dati di input.

import os import json import time from braket.jobs import save_job_result from braket.jobs.metrics import log_metric import networkx as nx import pennylane as qml from pennylane import numpy as np from matplotlib import pyplot as plt def init_pl_device(device_arn, num_nodes, shots, max_parallel): return qml.device( "braket.aws.qubit", device_arn=device_arn, wires=num_nodes, shots=shots, # Set s3_destination_folder=None to output task results to a default folder s3_destination_folder=None, parallel=True, max_parallel=max_parallel, ) def start_here(): input_dir = os.environ["AMZN_BRAKET_INPUT_DIR"] output_dir = os.environ["AMZN_BRAKET_JOB_RESULTS_DIR"] job_name = os.environ["AMZN_BRAKET_JOB_NAME"] checkpoint_dir = os.environ["AMZN_BRAKET_CHECKPOINT_DIR"] hp_file = os.environ["AMZN_BRAKET_HP_FILE"] device_arn = os.environ["AMZN_BRAKET_DEVICE_ARN"] # Read the hyperparameters with open(hp_file, "r") as f: hyperparams = json.load(f) p = int(hyperparams["p"]) seed = int(hyperparams["seed"]) max_parallel = int(hyperparams["max_parallel"]) num_iterations = int(hyperparams["num_iterations"]) stepsize = float(hyperparams["stepsize"]) shots = int(hyperparams["shots"]) # Generate random graph num_nodes = 6 num_edges = 8 graph_seed = 1967 g = nx.gnm_random_graph(num_nodes, num_edges, seed=graph_seed) # Output figure to file positions = nx.spring_layout(g, seed=seed) nx.draw(g, with_labels=True, pos=positions, node_size=600) plt.savefig(f"{output_dir}/graph.png") # Set up the QAOA problem cost_h, mixer_h = qml.qaoa.maxcut(g) def qaoa_layer(gamma, alpha): qml.qaoa.cost_layer(gamma, cost_h) qml.qaoa.mixer_layer(alpha, mixer_h) def circuit(params, **kwargs): for i in range(num_nodes): qml.Hadamard(wires=i) qml.layer(qaoa_layer, p, params[0], params[1]) dev = init_pl_device(device_arn, num_nodes, shots, max_parallel) np.random.seed(seed) cost_function = qml.ExpvalCost(circuit, cost_h, dev, optimize=True) params = 0.01 * np.random.uniform(size=[2, p]) optimizer = qml.GradientDescentOptimizer(stepsize=stepsize) print("Optimization start") for iteration in range(num_iterations): t0 = time.time() # Evaluates the cost, then does a gradient step to new params params, cost_before = optimizer.step_and_cost(cost_function, params) # Convert cost_before to a float so it's easier to handle cost_before = float(cost_before) t1 = time.time() if iteration == 0: print("Initial cost:", cost_before) else: print(f"Cost at step {iteration}:", cost_before) # Log the current loss as a metric log_metric( metric_name="Cost", value=cost_before, iteration_number=iteration, ) print(f"Completed iteration {iteration + 1}") print(f"Time to complete iteration: {t1 - t0} seconds") final_cost = float(cost_function(params)) log_metric( metric_name="Cost", value=final_cost, iteration_number=num_iterations, ) # We're done with the job, so save the result. # This will be returned in job.result() save_job_result({"params": params.numpy().tolist(), "cost": final_cost})

Lo script di lavoro è abbastanza simile agli script precedenti, tranne per il fatto che tiene traccia e stampa anche le metriche e i log prodotti dallo script dell'algoritmo e scarica i risultati nella directory locale.

import boto3 import time from braket.aws import AwsQuantumJob, AwsSession from braket.jobs.image_uris import Framework, retrieve_image from braket.jobs.metrics_data.definitions import MetricType device_arn = "arn:aws:braket:::device/quantum-simulator/amazon/sv1" hyperparameters = { # Number of tasks per iteration = 2 * (num_nodes + num_edges) * p + 1 "p": "2", "seed": "1967", # Maximum number of simultaneous tasks allowed "max_parallel": "10", # Number of total optimization iterations, including those from previous checkpoint (if any) "num_iterations": "5", # Step size / learning rate for gradient descent "stepsize": "0.1", # Shots for each circuit execution "shots": "1000", } # Use either the TensorFlow or PyTorch container for PennyLane region = AwsSession().region image_uri = retrieve_image(Framework.PL_TENSORFLOW, region) # image_uri = retrieve_image(Framework.PL_PYTORCH, region) start_time = time.time() job = AwsQuantumJob.create( image_uri=image_uri, entry_point="qaoa_source.algorithm_script:start_here", device=device_arn, source_module="qaoa_source", hyperparameters=hyperparameters, ) print(job.arn) while job.state() not in AwsQuantumJob.TERMINAL_STATES: print(job.state()) time.sleep(10) end_time = time.time() print(job.state()) print(end_time - start_time) print(job.metadata()) print(job.result()) # Metrics may not show up immediately, so wait for 120 seconds time.sleep(120) print(job.metrics()) # Print out logs from CloudWatch print(job.logs()) # Download outputs to local directory job.download_result()