Azure Event Grid Arduino Client – The joy of certs

“Lets start at the very beginning, A very good place to start”

The Azure Event Grid MQTT Broker server X509 certificate chain can be copy ‘n’ paste from the output of the openssl command

openssl s_client -connect YourNamespace.newzealandnorth-1.ts.eventgrid.azure.net:8883 -showcerts

A self-signed X509 root certificate which can sign intermediate X509 certificates and key file can be generated with a single openssl command.

openssl req -x509 -newkey rsa:4096 -keyout rootCA.key -out rootCA.crt -days 3650 -nodes -subj "/CN=devMobile  /O=devMobile.co.nz /C=NZ" -addext "basicConstraints=critical,CA:TRUE" -addext "keyUsage=critical,keyCertSign"

For a non-trivial system there should be a number of intermediate certificates. I have tried creating intermediate certificates for a device type, geography, application, customer and combinations of these. The first couple of times got it wrong so start with a field trial so that it isn’t so painful to go back and fix. (beware the sunk cost fallacy)

openssl genrsa -out intermediate.key 4096

openssl req -new -key intermediate.key -out intermediate.csr -subj "/CN=intermediate  /O=devMobile.co.nz /C=NZ"

I found creating an intermediate certificate that could sign device certificates required a conf file for the basicConstraints and keyUsage configuration.

[ v3_intermediate_ca ]
basicConstraints = critical, CA:TRUE, pathlen:0
keyUsage = critical, keyCertSign
  • critical-The extension must be understood and processed by any application validating the certificate. If the application does not understand it, the certificate must be rejected.
  • CA:TRUE-This certificate is allowed to act as a Certificate Authority (CA), meaning it can sign other certificates.
  • pathlen:0-This CA can only issue end-entity (leaf) certificates and cannot issue further intermediate CA certificates.
  • keyCertSig- The certificate can be used to sign other certificates (i.e., it’s a CA certificate).
openssl x509 -req -in intermediate.csr  -CA rootCA.crt -CAkey rootCA.key -CAcreateserial -out intermediate.crt -days 1825 -extfile intermediate_ext.cnf -extensions v3_intermediate_ca

Creating a device certificate is similar to the process for the intermediate certificate but doesn’t need to be able to sign certificates.

openssl genrsa -out EdgeBox100A.key 4096

openssl req -new -key EdgeBox100A.key -out EdgeBox100A.csr -subj "/CN=EdgeBox100A"

openssl x509 -req -in EdgeBox100A.csr -CA intermediate.crt -CAkey intermediate.key -CAcreateserial -out EdgeBox100A.crt -days 365 

For production systems putting some thought into the Common name(CN), Organizational unit name(OU), Organization name(O), locality name(L), state or province name(S) and Country name(C)

// Minimalist ESP32 + Event Grid MQTT (mTLS) with PubSubClient
// Copyright (c) November 2025, devMobile Software
#include <PubSubClient.h>
#include <WiFi.h>
#include <WiFiClientSecure.h>

#include "constants.h"
#include "secrets.h"

// --- Wi-Fi ---
//const char* WIFI_SSID     = "";
//const char* WIFI_PASSWORD = "";

// --- Event Grid MQTT ---
//const char* MQTT_SERVER = "";
const uint16_t MQTT_PORT = 8883;

//const char* MQTT_CLIENTID = "";
//const char* MQTT_USERNAME = "";
//const char* MQTT_PASSWORD = "";
//const char* MQTT_TOPIC_PUBLISH = "devices/";
//const char* MQTT_TOPIC_SUBSCRIBE = "devices/";

/*
// The certificate that is used to authenticate the MQTT Broker
const char CA_ROOT_PEM[] PROGMEM = R"PEM(
-----BEGIN CERTIFICATE-----
      Thumbprint: 56D955C849887874AA1767810366D90ADF6C8536
      CN: CN=Microsoft Azure ECC TLS Issuing CA 03
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
      Thumbprint: 7E04DE896A3E666D00E687D33FFAD93BE83D349E
      CN: CN=DigiCert Global Root G3
-----END CERTIFICATE-----
)PEM";

The certificate that is used to authenticate the device
static const char CLIENT_CERT_PEM[] PROGMEM = R"PEM(
-----BEGIN CERTIFICATE-----
 CN=Self signed device certificate
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
 CN=Self signed Intermediate certificate
-----END CERTIFICATE-----
)PEM";

 The PEM encoded private key of device
static const char CLIENT_KEY_PEM[] PROGMEM = R"PEM(
-----BEGIN PRIVATE KEY-----
-----END PRIVATE KEY-----
)PEM";
*/

WiFiClientSecure secureClient;
PubSubClient mqttClient(secureClient);

void setup() {
  Serial.begin(9600);
  delay(5000);
  Serial.println();

  // Connect to WiFi
  Serial.println("WiFi connecting");
  WiFi.begin(WIFI_SSID, WIFI_PASSWORD);
  Serial.print("*");
  while (WiFi.status() != WL_CONNECTED) {
    delay(500);
    Serial.print("*");
  }
  Serial.println("\nWiFi connected");

  // Sync time for TLS
  Serial.println("\nTime synchronising");
  configTime(0, 0, "pool.ntp.org", "time.nist.gov");
  Serial.print("*");
  while (time(nullptr) < 100000) {
    delay(500);
    Serial.print("*");
  }
  Serial.println("\nTime synchronised");

  Serial.println("\nValidating ServerFQDN-Certificate combination");
  secureClient.setCACert(CA_ROOT_PEM);

  Serial.println("TCP connecting");
  if (secureClient.connect(MQTT_SERVER, MQTT_PORT)) {
    Serial.println("\nTCP connected");
  } else {
    Serial.println("\nTCP connection failed");
    return;
  }

  secureClient.setCertificate(CLIENT_CERT_A_PEM);
  secureClient.setPrivateKey(CLIENT_KEY_A_PEM);

  mqttClient.setServer(MQTT_SERVER, MQTT_PORT);

  Serial.println("\nMQTT connecting");
  Serial.print("*");
  while (!mqttClient.connect(MQTT_CLIENTID, MQTT_USERNAME, MQTT_PASSWORD)) {
    Serial.println(mqttClient.state());
    delay(5000);
    Serial.print("*");
  }
  Serial.println("\nMQTT connected");
}

static uint32_t sequenceNumber = 0;

void loop() {
  mqttClient.loop();

  Serial.print("'.");
  delay(10000);
}

My Arduino Xiao ESP32S3 and EdgeBox-ESP-100-Industrial Edge Controller devices could connect to the local Wi-Fi, get the time and date using the network time protocol(NTP), and validate the Azure Event Grid MQTT broker certificate. Then connect to the Azure Event Grid MQTT broker with the client name specified in the subject name of its X509 certificate.

Establishing a connection to the Azure Event Grid MQTT broker often failed which surprised me. Initially I didn’t have any retry logic which meant I wasted quite a bit of time trying to debug failed connections

.NET 10 OpenAPI and SwashBuckle NuGets

Yesterday I downloaded Microsoft Visual Studio 2026 and started updating the projects I am working on to .NET 10 and updating any NuGets.

I left these three NuGets to the last as I have had problems updating them before, and this time was no different. The updated NuGets “broke” my code because the way that security definitions and security requirements were implemented had changed.

These articles were the inspiration for my approach

   options.AddSecurityDefinition("X-API-Key", new OpenApiSecurityScheme
   {
      Description = "JWT Authorization header using the Bearer scheme. Example: \"Authorization: Bearer {token}\"",
      Name = "Authorization",
      In = ParameterLocation.Header,
      Type = SecuritySchemeType.Http,
      Scheme = "Bearer"
   });

   options.AddSecurityRequirement(document => new OpenApiSecurityRequirement
   {
      [new OpenApiSecuritySchemeReference("Bearer", document)] = [],
      [new OpenApiSecuritySchemeReference("X-API-Key", document)] = []
   });
});

Warning: make sure the schema etc. have same case so you don’t lose an hour from your life that you will never get back.

With the above updates the application would work but….

WithOpenApi was originally designed for minimal APIs to attach an OpenApiOperation to endpoint metadata so tools like Swashbuckle could consume it.

Deprecation of WithOpenApi extension method

However, starting with .NET 9, ASP.NET Core introduced native OpenAPI document generation via Microsoft.AspNetCore.OpenApi. This made WithOpenApi unnecessary because the new pipeline already supports operation customization through transformers.

app.MapGet("Version", () =>
{
   return Results.Ok(typeof(Program).Assembly.GetName().Version?.ToString());
}).RequireAuthorization()
         .WithName("Version")
         .Produces<string>(StatusCodes.Status200OK)
         .Produces(StatusCodes.Status401Unauthorized)
         .AddOpenApiOperationTransformer((operation, context, ct) =>
         {
            // Per-endpoint tweaks
            operation.Summary = "Returns version of the application";
            operation.Description = "Returns the version of the application from project metadata.";
            return Task.CompletedTask;
         });

The new transformer API (AddOpenApiOperationTransformer) works directly with the built-in OpenAPI pipeline. It allows per-operation or global modifications without relying on third-party libraries.

ONNXRuntime.AI-Faster R-CNN C# Sample differences

After building Faster R-CCN object detection applications with Copilot and Github Copilot the results when compared with the onnxruntime.ai Object detection with Faster RCNN Deep Learning in C# sample (which hasn’t been updated for years) were slightly different.

The sample image was 640×480 pixels

The FasterRCNNObjectDetectionApplicationGitHubCopilot application scaled image was initially 1056×800 then 1088×800 pixels.

The initial version the dimensions were “rounded down” to the next multiple of 32

// Calculate scale factor to fit within the range while maintaining aspect ratio
float scale = Math.Min((float)maxSize / Math.Max(originalWidth, originalHeight),
                                (float)minSize / Math.Min(originalWidth, originalHeight));

// Calculate new dimensions
int newWidth = (int)(originalWidth * scale);
int newHeight = (int)(originalHeight * scale);

// Ensure dimensions are divisible by 32
newWidth = (newWidth / divisor) * divisor;
newHeight = (newHeight / divisor) * divisor;
Scaled 1056×800

Then for the second version the dimensions were “rounded up” to the next multiple of 32

// Calculate scale factor to fit within the range while maintaining aspect ratio
float scale = Math.Min((float)maxSize / Math.Max(originalWidth, originalHeight),
                                (float)minSize / Math.Min(originalWidth, originalHeight));

// Calculate new dimensions
int newWidth = (int)(originalWidth * scale);
int newHeight = (int)(originalHeight * scale);

// Ensure dimensions are divisible by 32
newWidth = (int)(Math.Ceiling(newWidth / 32f) * 32f);
newHeight = (int)(Math.Ceiling(newHeight / 32f) * 32f);
Scaled 1088×800
Marked up 1088×800

The FasterRCNNObjectDetectionApplicationOriginal application scaled the input image to 1066×800

Scaled image 1066×800

The FasterRCNNObjectDetectionApplicationOriginal application pillar boxed/padded the image to 1088×800 as the DenseTensor was loaded.

using Image<Rgb24> image = Image.Load<Rgb24>(imageFilePath);

Console.WriteLine($"Before x:{image.Width} y:{image.Height}");

// Resize image
float ratio = 800f / Math.Min(image.Width, image.Height);
image.Mutate(x => x.Resize((int)(ratio * image.Width), (int)(ratio * image.Height)));

Console.WriteLine($"After x:{image.Width} y:{image.Height}");

// Preprocess image
var paddedHeight = (int)(Math.Ceiling(image.Height / 32f) * 32f);
var paddedWidth = (int)(Math.Ceiling(image.Width / 32f) * 32f);

Console.WriteLine($"Padded x:{paddedWidth} y:{paddedHeight}");

Tensor<float> input = new DenseTensor<float>(new[] { 3, paddedHeight, paddedWidth });
var mean = new[] { 102.9801f, 115.9465f, 122.7717f };
image.ProcessPixelRows(accessor =>
{
   for (int y = paddedHeight - accessor.Height; y < accessor.Height; y++)
   {
      Span<Rgb24> pixelSpan = accessor.GetRowSpan(y);
      for (int x = paddedWidth - accessor.Width; x < accessor.Width; x++)
      {
         input[0, y, x] = pixelSpan[x].B - mean[0];
         input[1, y, x] = pixelSpan[x].G - mean[1];
         input[2, y, x] = pixelSpan[x].R - mean[2];
      }
   }
});
Marked up image 1066×800

I think the three different implementations of the preprocessing steps and the graphics libraries used probably caused the differences in the results. The way an image is “resized” by System.Graphics.Common vs. ImageSharp(resampled, cropped and centered or padded and pillar boxed) could make a significant difference to the results.

ONNXRuntime.AI-Faster R-CNN C# Sample oddness

After building Faster R-CCN object detection applications with Copilot and Github Copilot the results when compared with Utralytics Yolo (with YoloSharp) didn’t look too bad.

The input image sports.jpg 1200×798 pixels

The GithubCopilot FasterRCNNObjectDetectionApplicationCopilot application only generated labels, confidences and minimum bounding box coordinates.

The FasterRCNNObjectDetectionApplicationGitHubCopilot application the marked-up image was 1200×798 pixels

The YoloSharpObjectDetectionApplication application marked-up image was 1200×798 pixels

I went back to the onnxruntime.ai Object detection with Faster RCNN Deep Learning in C# sample source code to check my implementations and the highlighted area on the left caught my attention.

The FasterRCNNObjectDetectionApplicationOriginal application marked up image was 1023×800

I downloaded the sample code which hadn’t been updated for years.

public static void Main(string[] args)
{
   Console.WriteLine("FasterRCNNObjectDetectionApplicationOriginal");

   // Read paths
   string modelFilePath = args[0];
   string imageFilePath = args[1];
   string outImageFilePath = args[2];

   // Read image
   using Image<Rgb24> image = Image.Load<Rgb24>(imageFilePath);

   // Resize image
   float ratio = 800f / Math.Min(image.Width, image.Height);
   image.Mutate(x => x.Resize((int)(ratio * image.Width), (int)(ratio * image.Height)));

   // Preprocess image
   var paddedHeight = (int)(Math.Ceiling(image.Height / 32f) * 32f);
   var paddedWidth = (int)(Math.Ceiling(image.Width / 32f) * 32f);
   Tensor<float> input = new DenseTensor<float>(new[] { 3, paddedHeight, paddedWidth });
   var mean = new[] { 102.9801f, 115.9465f, 122.7717f };
   image.ProcessPixelRows(accessor =>
   {
      for (int y = paddedHeight - accessor.Height; y < accessor.Height; y++)
      {
         Span<Rgb24> pixelSpan = accessor.GetRowSpan(y);
         for (int x = paddedWidth - accessor.Width; x < accessor.Width; x++)
         {
            input[0, y, x] = pixelSpan[x].B - mean[0];
            input[1, y, x] = pixelSpan[x].G - mean[1];
            input[2, y, x] = pixelSpan[x].R - mean[2];
         }
      }
   });

   // Setup inputs and outputs
   var inputs = new List<NamedOnnxValue>
      {
            NamedOnnxValue.CreateFromTensor("image", input)
      };

   // Run inference
   using var session = new InferenceSession(modelFilePath);
   using IDisposableReadOnlyCollection<DisposableNamedOnnxValue> results = session.Run(inputs);

   // Postprocess to get predictions
   var resultsArray = results.ToArray();
   float[] boxes = resultsArray[0].AsEnumerable<float>().ToArray();
   long[] labels = resultsArray[1].AsEnumerable<long>().ToArray();
   float[] confidences = resultsArray[2].AsEnumerable<float>().ToArray();
   var predictions = new List<Prediction>();
   var minConfidence = 0.7f;
   for (int i = 0; i < boxes.Length - 4; i += 4)
   {
      var index = i / 4;
      if (confidences[index] >= minConfidence)
      {
         predictions.Add(new Prediction
         {
            Box = new Box(boxes[i], boxes[i + 1], boxes[i + 2], boxes[i + 3]),
            Label = LabelMap.Labels[labels[index]],
            Confidence = confidences[index]
         });
      }
   }

   // Put boxes, labels and confidence on image and save for viewing
   using var outputImage = File.OpenWrite(outImageFilePath);
   Font font = SystemFonts.CreateFont("Arial", 16);
   foreach (var p in predictions)
   {
      Console.WriteLine($"Label: {p.Label}, Confidence: {p.Confidence}, Bounding Box:[{p.Box.Xmin}, {p.Box.Ymin}, {p.Box.Xmax}, {p.Box.Ymax}]");
      image.Mutate(x =>
      {
         x.DrawLine(Color.Red, 2f, new PointF[] {

                  new PointF(p.Box.Xmin, p.Box.Ymin),
                  new PointF(p.Box.Xmax, p.Box.Ymin),

                  new PointF(p.Box.Xmax, p.Box.Ymin),
                  new PointF(p.Box.Xmax, p.Box.Ymax),

                  new PointF(p.Box.Xmax, p.Box.Ymax),
                  new PointF(p.Box.Xmin, p.Box.Ymax),

                  new PointF(p.Box.Xmin, p.Box.Ymax),
                  new PointF(p.Box.Xmin, p.Box.Ymin)
               });
         x.DrawText($"{p.Label}, {p.Confidence:0.00}", font, Color.White, new PointF(p.Box.Xmin, p.Box.Ymin));
      });
   }
   image.SaveAsJpeg(outputImage);

   Console.WriteLine("Press Enter to exit");
   Console.ReadLine();
}

I then compared the output of the object detection applications and the onnxruntime.ai Object detection with Faster RCNN Deep Learning in C# sample was different.

After some investigation I think the scaling of the image used for inferencing (based on the requirements on the model), then the scaling of the minimum bounding rectangles isn’t quite right.

Building Edge AI with Github Copilot- Security Camera HTTP YoloSharp

When I started with the Security Camera HTTP code and added code to process the images with Ultralytics Object Detection model I found the order of the prompts could make a difference. My first attempt at adding YoloSharp to the SecurityCameraHttpClient application with Github Copilot didn’t go well and needed some “human intervention”. When I thought more about the order of the prompts the adding the same functionality went a lot better.

// Use a stream rather than loading image from a file
// Use YoloSharp to run an onnx Object Detection model on the image
// Make the YoloPredictor a class variable
// Save image if object with specified image class name detected
// Modify so objectDetected supports multiple image class names
// Modify code to make use of GPU configurable
// Make display of detections configurable in app settings
// Make saving of image configurable in app settings

internal class Program
{
   private static HttpClient _client;
   private static bool _isRetrievingImage = false;
   private static ApplicationSettings _applicationSettings;
   private static YoloPredictor _yoloPredictor;

   static void Main(string[] args)
   {
      Console.WriteLine($"{DateTime.UtcNow:yy-MM-dd HH:mm:ss} SecurityCameraClient starting");
#if RELEASE
         Console.WriteLine("RELEASE");
#else
         Console.WriteLine("DEBUG");
#endif

      var configuration = new ConfigurationBuilder()
            .AddJsonFile("appsettings.json", false, true)
            .AddUserSecrets<Program>()
            .Build();

      _applicationSettings = configuration.GetSection("ApplicationSettings").Get<ApplicationSettings>();

      // Initialize YoloPredictor with GPU configuration
      _yoloPredictor = new YoloPredictor(_applicationSettings.OnnxModelPath, new YoloPredictorOptions()
      {
         UseCuda = _applicationSettings.UseCuda, // Configurable GPU usage
      });

      using (HttpClientHandler handler = new HttpClientHandler { Credentials = new NetworkCredential(_applicationSettings.Username, _applicationSettings.Password) })
      using (_client = new HttpClient(handler))
      using (var timer = new Timer(async _ => await RetrieveImageAsync(), null, _applicationSettings.TimerDue, _applicationSettings.TimerPeriod))
      {
         Console.WriteLine("Press any key to exit...");
         Console.ReadKey();
      }
   }

      private static async Task RetrieveImageAsync()
      {
         if (_isRetrievingImage) return;

         _isRetrievingImage = true;
         try
         {
            Console.WriteLine($"{DateTime.UtcNow:yy-MM-dd HH:mm:ss.fff} SecurityCameraClient download starting");

            HttpResponseMessage response = await _client.GetAsync(_applicationSettings.CameraUrl);
            response.EnsureSuccessStatusCode();

            using (Stream imageStream = await response.Content.ReadAsStreamAsync())
            {
               var detections = _yoloPredictor.Detect(imageStream);
               bool objectDetected = false;

               foreach (var detection in detections)
               {
                  if (_applicationSettings.LogDetections) // Check if logging detections is enabled
                  {
                     Console.WriteLine($"Detected {detection.Name.Name} with confidence {detection.Confidence}");
                  }

                  if (_applicationSettings.ClassNames.Contains(detection.Name.Name))
                  {
                     objectDetected = true;
                  }
               }

               if (objectDetected && _applicationSettings.SaveImage) // Check if saving images is enabled
               {
                  string savePath = string.Format(_applicationSettings.SavePath, DateTime.UtcNow);
                  using (FileStream fileStream = new FileStream(savePath, FileMode.Create, FileAccess.Write, FileShare.None))
                  {
                     imageStream.Position = 0;
                     await imageStream.CopyToAsync(fileStream);
                  }
               }
            }

            Console.WriteLine($"{DateTime.UtcNow:yy-MM-dd HH:mm:ss.fff} SecurityCameraClient download done");
         }
         catch (Exception ex)
         {
            Console.WriteLine($"An error occurred: {ex.Message}");
         }
         finally
         {
            _isRetrievingImage = false;
         }
      }
}

public class ApplicationSettings
{
   public string CameraUrl { get; set; } = "";
   public string SavePath { get; set; } = "";
   public string Username { get; set; } = "";
   public string Password { get; set; } = "";
   public TimeSpan TimerDue { get; set; } = TimeSpan.Zero;
   public TimeSpan TimerPeriod { get; set; } = TimeSpan.Zero;
   public string OnnxModelPath { get; set; } = "";
   public bool UseCuda { get; set; } = false; // Configurable GPU usage
   public List<string> ClassNames { get; set; } //= new List<string>();
   public bool LogDetections { get; set; } = false; // Configurable logging of detections 
   public bool SaveImage { get; set; } = false; // Configurable saving of images
   }
}

The interactions Visual Studio IntelliSense with the GitHub Copilot prompts was interesting.

I wonder if this is because Visual Studio Intellisense has local context, whereas Github Copilot has “cloud” context.

It took a couple of failed attempts to find the best order, which I think would reduce over time.

The Copilot generated code in this post is not suitable for production

RTSP Camera rosenbjerg.FFMpegCore GDI Error

While working on my SecurityCameraRTSPClientFFMpegCore project I noticed that every so often after opening the Realtime Streaming Protocol(RTSP) connection with my HiLook IPCT250H Security Camera there was a “Paremeter is not valid” or “A generic error occurred in GDI+.” exception and sometimes the image was corrupted.

My test harness code was “inspired” by the Continuous Snapshots on Live Stream #280 sample

sing (var ms = new MemoryStream())
{
    await FFMpegArguments
        .FromUrlInput(new Uri("udp://192.168.2.12:9000"))
        .OutputToPipe(new StreamPipeSink(ms), options => options
            .ForceFormat("rawvideo")
            .WithVideoCodec(VideoCodec.Png)
            .Resize(new Size(Config.JpgWidthLarge, Config.JpgHeightLarge))
            .WithCustomArgument("-vf fps=1 -update 1")
        )
        .NotifyOnProgress(o => 
        {
            try
            {
                if (ms.Length > 0)
                {
                    ms.Position = 0;
                    using (var bitmap = new Bitmap(ms))
                    {
                        // Modify bitmap here

                        // Save the bitmap
                        bitmap.Save("test.png");
                    }

                    ms.SetLength(0);
                }
            }
            catch { }
        })
        .ProcessAsynchronously();
}

My implementation is slightly different because I caught then displayed any exceptions generated converting the image stream to a bitmap or saving it.

using (var ms = new MemoryStream())
{
   await FFMpegArguments
         .FromUrlInput(new Uri(_applicationSettings.CameraUrl))
         .OutputToPipe(new StreamPipeSink(ms), options => options
         .ForceFormat("mpeg1video")
         //.ForceFormat("rawvideo")
         .WithCustomArgument("-rtsp_transport tcp")
         .WithFramerate(10)
         .WithVideoCodec(VideoCodec.Png)
         //.Resize(1024, 1024)
         //.ForceFormat("image2pipe")
         //.Resize(new Size(Config.JpgWidthLarge, Config.JpgHeightLarge))
         //.Resize(new Size(Config.JpgWidthLarge, Config.JpgHeightLarge))
         //.WithCustomArgument("-vf fps=1 -update 1")
         //.WithCustomArgument("-vf fps=5 -update 1")
         //.WithSpeedPreset( Speed.)
         //.UsingMultithreading()
         //.UsingThreads()
         //.WithVideoFilters(filter => filter.Scale(640, 480))
         //.UsingShortest()
         //.WithFastStart()
         )
         .NotifyOnProgress(o =>
         {
            try
            {
               if (ms.Length > 0)
               {
                  ms.Position = 0;

                  string outputPath = Path.Combine(_applicationSettings.SavePath, string.Format(_applicationSettings.FrameFileNameFormat, DateTime.UtcNow ));

                  using (var bitmap = new Bitmap(ms))
                  {
                     // Save the bitmap
                     bitmap.Save(outputPath);
                  }

                  ms.SetLength(0);
               }
            }
            catch (Exception ex)
            {
               Console.WriteLine($"{DateTime.UtcNow:yy-MM-dd HH:mm:ss.fff} {ex.Message}");
            }
         })
         .ProcessAsynchronously();
}

I have created a Continuous Snapshots on Live Stream Memory stream contains invalid bitmap image #562 to track the issue.

One odd thing that I noticed when scrolling “back and forth” through the images around when there was exception was that the date and time on the top left of the image was broken.

I wonder if the image was “broken” in some subtle way and FFMpegCore is handling this differently to the other libraries I’m trialing.

RTSP Camera RabbitOM.Streaming

The RTSPCameraNagerVideoStream library had significant latency which wasn’t good as I wanted to trigger the processing of images from the Real-time Streaming Protocol(RTSP) on my Seeedstudio J3011 Industrial device by strobing one of the digital inputs and combine streamed images with timestamped static ones.

HiLook IPCT250H Camera configuration

To get a Moving Picture Experts Group(MPEG) stream I had to change the camera channel rather than use than H.264+ video Encoding

RtspCameraUrl”: “rtsp://10.0.0.19/ISAPI/Streaming/channels/102”

The KSAH-42.RabbitOM library looked worth testing so I built a test harness inspired by RabbitOM.Streaming.Tests.ConsoleApp.

client.PacketReceived += (sender, e) =>
{
   var interleavedPacket = e.Packet as RtspInterleavedPacket;

   if (interleavedPacket != null && interleavedPacket.Channel > 0)
   {
      // In most of case, avoid this packet
      Console.ForegroundColor = ConsoleColor.DarkCyan;
      Console.WriteLine("Skipping some data : size {0}", e.Packet.Data.Length);
      return;
   }

   Console.WriteLine($"{DateTime.UtcNow:yy-MM-dd HH:mm:ss.fff} New image received, bytes:{e.Packet.Data.Length}");

   File.WriteAllBytes(Path.Combine(_applicationSettings.SavePath, string.Format(_applicationSettings.FrameFileNameFormat, DateTime.UtcNow)), e.Packet.Data);
};

When I ran my test harness the number of images didn’t match the frame rate configured in the camera

The format of the images was corrupted, and I couldn’t open them

It looked like I was writing RTSP packets to the disk rather than Joint Photographic Experts Group(JPEG) images from the MPEG stream.

There was another sample application RabbitOM.Streaming.Tests.Mjpeg which displayed JPEG images. After looking at the code I figured out I need to use the RtpFrameBuilder class to assemble the RTSP packets into frames.

private static readonly RtpFrameBuilder _frameBuilder = new JpegFrameBuilder();
...
_frameBuilder.FrameReceived += OnFrameReceived;
...
client.PacketReceived += (sender, e) =>
{
   var interleavedPacket = e.Packet as RtspInterleavedPacket;

   if (interleavedPacket != null && interleavedPacket.Channel > 0)
   {
      // In most of case, avoid this packet
      Console.ForegroundColor = ConsoleColor.DarkCyan;
      Console.WriteLine("Skipping some data : size {0}", e.Packet.Data.Length);
      return;
   }

   _frameBuilder.Write(interleavedPacket.Data); 
};
private static void OnFrameReceived(object sender, RtpFrameReceivedEventArgs e)
{
   Console.WriteLine($"{DateTime.UtcNow:yy-MM-dd HH:mm:ss.fff} New image received, bytes:{e.Frame.Data.Length}");

   File.WriteAllBytes(Path.Combine(_applicationSettings.SavePath, string.Format(_applicationSettings.FrameFileNameFormat, DateTime.UtcNow)), e.Frame.Data);
}

With the modified code the image size looked roughly the same as the SecurityCameraHttpClient images

The format of the images was good, and I could open them

Looks like KSAH-42.RabbitOM might be a good choice as it doesn’t have any external dependencies and the latency is minimal.

Building Cloud AI with AI-YoloSharp Object detection Azure HTTP Trigger function

Introduction

Using Copilot to generate the code to retrieve an image from a security camera and small applications for object detection, object classification, with YoloSharp also went surprisingly well. An Azure HTTP Trigger function is a bit more complex so I decided to do one next. I started with the Visual Studio 2022 generated code

using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Mvc;
using Microsoft.Azure.Functions.Worker;
using Microsoft.Extensions.Logging;

namespace YoloObjectDetectionFunction
{
    public class Function1
    {
        private readonly ILogger<Function1> _logger;

        public Function1(ILogger<Function1> logger)
        {
            _logger = logger;
        }

        [Function("Function1")]
        public IActionResult Run([HttpTrigger(AuthorizationLevel.Function, "get", "post")] HttpRequest req)
        {
            _logger.LogInformation("C# HTTP trigger function processed a request.");
            return new OkObjectResult("Welcome to Azure Functions!");
        }
    }
}

I then generated the first version of the code with Copilot desktop

The code looked “okay” but had lots of extra “usings”, used FunctionName vs Function attributes etc. so didn’t compile.

I think the “var yolo = new Yolo(modelPath)” comes from YoloDotNet by Niklas Swärd and “var detections = yolo.Predict(image)” is from YoloV8.Net by Samuel Stainback

using System.IO;
using System.Linq;
using System.Net;
using System.Net.Http;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Mvc;
using Microsoft.Azure.WebJobs;
using Microsoft.Azure.WebJobs.Extensions.Http;
using Microsoft.Extensions.Logging;
using SixLabors.ImageSharp;
using SixLabors.ImageSharp.PixelFormats;
using SixLabors.ImageSharp.Processing;
using YoloSharp;
using YoloSharp.DataStructures;

public static class YoloObjectDetectionFunction
{
    private static readonly string modelPath = "path/to/your/yolo-model.onnx";

    [FunctionName("YoloObjectDetection")]
    public static async Task<IActionResult> Run(
        [HttpTrigger(AuthorizationLevel.Function, "post", Route = null)] HttpRequestMessage req,
        ILogger log)
    {
        log.LogInformation("Yolo Object Detection function processed a request.");

        // Read the image from the request
        byte[] imageBytes = await req.Content.ReadAsByteArrayAsync();
        using (Image<Rgba32> image = Image.Load<Rgba32>(imageBytes))
        {
            // Initialize YoloSharp
            using (var yolo = new Yolo(modelPath))
            {
                // Detect objects in the image
                var detections = yolo.Predict(image);

                // Process the detections
                foreach (var detection in detections)
                {
                    // Draw bounding boxes on the image
                    image.Mutate(ctx => ctx.DrawPolygon(Rgba32.Red, 2, detection.BoundingBox.ToPolygon()));
                }

                // Save the modified image to a memory stream
                using (var ms = new MemoryStream())
                {
                    image.SaveAsPng(ms);
                    ms.Seek(0, SeekOrigin.Begin);

                    // Return the modified image as a response
                    var result = new HttpResponseMessage(HttpStatusCode.OK)
                    {
                        Content = new StreamContent(ms)
                    };
                    result.Content.Headers.ContentType = new System.Net.Http.Headers.MediaTypeHeaderValue("image/png");
                    return new FileStreamResult(ms, "image/png");
                }
            }
        }
    }
}

I manually fixed all the issues in the code with a lot of assistance from Visual Studio 2022 Intellisense

using System.Net;

using Microsoft.AspNetCore.Mvc;
using Microsoft.Azure.Functions.Worker;
using Microsoft.Extensions.Logging;

using SixLabors.ImageSharp;
using SixLabors.ImageSharp.PixelFormats;
using SixLabors.ImageSharp.Processing;
using SixLabors.ImageSharp.Drawing.Processing;

//using YoloSharp;
//using YoloSharp.DataStructures;
using Compunet.YoloSharp;


public static class YoloObjectDetectionFunction
{
   //private static readonly string modelPath = "path/to/your/yolo-model.onnx";
   private static readonly string modelPath = "yolov8s.onnx";

   //[FunctionName("YoloObjectDetection")]
   [Function("YoloObjectDetection")]
   public static async Task<IActionResult> Run(
       [HttpTrigger(AuthorizationLevel.Function, "post", Route = null)] HttpRequestMessage req,
       ILogger log)
   {
      log.LogInformation("Yolo Object Detection function processed a request.");

      // Read the image from the request
      byte[] imageBytes = await req.Content.ReadAsByteArrayAsync();
      using (Image<Rgba32> image = Image.Load<Rgba32>(imageBytes))
      {
         // Initialize YoloSharp
         //using (var yolo = new Yolo(modelPath))
         using (var yolo = new YoloPredictor(modelPath))
         {
            // Detect objects in the image
            //var detections = yolo.Predict(image);
            var detections = yolo.Detect(image);

            // Process the detections
            foreach (var detection in detections)
            {
               // Draw bounding boxes on the image
               //image.Mutate(ctx => ctx.DrawPolygon(Rgba32.Red, 2, detection.BoundingBox.ToPolygon()));

               var rectangle = new PointF[] {new Point(detection.Bounds.Bottom, detection.Bounds.Left), new Point(detection.Bounds.Bottom, detection.Bounds.Right), new Point(detection.Bounds.Right, detection.Bounds.Top), new Point(detection.Bounds.Left, detection.Bounds.Top)};

               image.Mutate(ctx => ctx.DrawPolygon(Rgba32.ParseHex("FF0000"), 2, rectangle));
            }

            // Save the modified image to a memory stream
            using (var ms = new MemoryStream())
            {
               image.SaveAsPng(ms);
               ms.Seek(0, SeekOrigin.Begin);

               // Return the modified image as a response
               var result = new HttpResponseMessage(HttpStatusCode.OK)
               {
                  Content = new StreamContent(ms)
               };
               result.Content.Headers.ContentType = new System.Net.Http.Headers.MediaTypeHeaderValue("image/png");
               return new FileStreamResult(ms, "image/png");
            }
         }
      }
   }
}

When I posted an image to the function with Telerik Fiddler Classic it threw an exception, because the ILogger injected into the Run method was null.

I then asked Github Copilot to “Modify code so the ILogger is parameter of constructor”

using System.Net;

using Microsoft.AspNetCore.Mvc;
using Microsoft.Azure.Functions.Worker;
using Microsoft.Extensions.Logging;

using SixLabors.ImageSharp;
using SixLabors.ImageSharp.PixelFormats;
using SixLabors.ImageSharp.Processing;
using SixLabors.ImageSharp.Drawing.Processing;

//using YoloSharp;
//using YoloSharp.DataStructures;
using Compunet.YoloSharp;


//public static class YoloObjectDetectionFunction
public class YoloObjectDetectionFunction
{
   private static readonly ILogger _log;
   //private static readonly string modelPath = "path/to/your/yolo-model.onnx";
   private static readonly string modelPath = "yolov8s.onnx";

   public YoloObjectDetectionFunction(ILogger<YoloObjectDetectionFunction> log)
   {
      _log = log;
   }

   //[FunctionName("YoloObjectDetection")]
   [Function("YoloObjectDetection")]
   //public static async Task<IActionResult> Run( [HttpTrigger(AuthorizationLevel.Function, "post", Route = null)] HttpRequestMessage req, ILogger log)
   public static async Task<IActionResult> Run([HttpTrigger(AuthorizationLevel.Function, "post", Route = null)] HttpRequestMessage req)
   {
      _log.LogInformation("Yolo Object Detection function processed a request.");

      // Read the image from the request
      byte[] imageBytes = await req.Content.ReadAsByteArrayAsync();
      using (Image<Rgba32> image = Image.Load<Rgba32>(imageBytes))
      {
         // Initialize YoloSharp
         //using (var yolo = new Yolo(modelPath))
         using (var yolo = new YoloPredictor(modelPath))
         {
            // Detect objects in the image
            //var detections = yolo.Predict(image);
            var detections = yolo.Detect(image);

            // Process the detections
            foreach (var detection in detections)
            {
               // Draw bounding boxes on the image
               //image.Mutate(ctx => ctx.DrawPolygon(Rgba32.Red, 2, detection.BoundingBox.ToPolygon()));

               var rectangle = new PointF[] {new Point(detection.Bounds.Bottom, detection.Bounds.Left), new Point(detection.Bounds.Bottom, detection.Bounds.Right), new Point(detection.Bounds.Right, detection.Bounds.Top), new Point(detection.Bounds.Left, detection.Bounds.Top)};

               image.Mutate(ctx => ctx.DrawPolygon(Rgba32.ParseHex("FF0000"), 2, rectangle));
            }

            // Save the modified image to a memory stream
            using (var ms = new MemoryStream())
            {
               image.SaveAsPng(ms);
               ms.Seek(0, SeekOrigin.Begin);

               // Return the modified image as a response
               var result = new HttpResponseMessage(HttpStatusCode.OK)
               {
                  Content = new StreamContent(ms)
               };
               result.Content.Headers.ContentType = new System.Net.Http.Headers.MediaTypeHeaderValue("image/png");
               return new FileStreamResult(ms, "image/png");
            }
         }
      }
   }
}

When I posted an image to the function it threw an exception, because content of the HttpRequestMessage was null.

I then asked Github Copilot to “Modify the code so that the image is read from the form”

// Read the image from the form
var form = await req.ReadFormAsync();
var file = form.Files["image"];
if (file == null || file.Length == 0)
{
   return new BadRequestObjectResult("Image file is missing or empty.");
}

When I posted an image to the function it returned a 400 Bad Request Error.

After inspecting the request I realized that the name field was wrong, as the generated code was looking for “image”

Content-Disposition: form-data; name=”image”; filename=”sports.jpg”

Then, when I posted an image to the function it returned a 500 error.

But, the FileStreamResult was failing so I modified the code to return a FileContentResult

using (var ms = new MemoryStream())
{
   image.SaveAsJpeg(ms);

   return new FileContentResult(ms.ToArray(), "image/jpg");
}

Then, when I posted an image to the function it succeeded

But, the bounding boxes around the detected objects were wrong.

I then manually fixed up the polygon code so the lines for each bounding box were drawn in the correct order.

// Process the detections
foreach (var detection in detections)
{
   var rectangle = new PointF[] {
      new Point(detection.Bounds.Left, detection.Bounds.Bottom),
      new Point(detection.Bounds.Right, detection.Bounds.Bottom),
      new Point(detection.Bounds.Right, detection.Bounds.Top),
      new Point(detection.Bounds.Left, detection.Bounds.Top)
 };

Then, when I posted an image to the function it succeeded

The bounding boxes around the detected objects were correct.

I then “refactored” the code, removing all the unused “using”s, removed any commented out code, changed ILogger to be initialised using a Primary Constructor etc.

using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Mvc;
using Microsoft.Azure.Functions.Worker;
using Microsoft.Extensions.Logging;

using SixLabors.ImageSharp;
using SixLabors.ImageSharp.PixelFormats;
using SixLabors.ImageSharp.Processing;
using SixLabors.ImageSharp.Drawing.Processing;

using Compunet.YoloSharp;

public class YoloObjectDetectionFunction(ILogger<YoloObjectDetectionFunction> log)
{
   private readonly ILogger<YoloObjectDetectionFunction> _log = log;
   private readonly string modelPath = "yolov8s.onnx";

   [Function("YoloObjectDetection")]
   public async Task<IActionResult> Run([HttpTrigger(AuthorizationLevel.Function, "post", Route = null)] HttpRequest req)
   {
      _log.LogInformation("Yolo Object Detection function processed a request.");

      // Read the image from the form
      var form = await req.ReadFormAsync();
      var file = form.Files["image"];
      if (file == null || file.Length == 0)
      {
         return new BadRequestObjectResult("Image file is missing or empty.");
      }

      using (var stream = file.OpenReadStream())
      using (Image<Rgba32> image = Image.Load<Rgba32>(stream))
      {
         // Initialize YoloSharp
         using (var yolo = new YoloPredictor(modelPath))
         {
            // Detect objects in the image
            var detections = yolo.Detect(image);

            // Process the detections
            foreach (var detection in detections)
            {
               var rectangle = new PointF[] {
                  new Point(detection.Bounds.Left, detection.Bounds.Bottom),
                  new Point(detection.Bounds.Right, detection.Bounds.Bottom),
                  new Point(detection.Bounds.Right, detection.Bounds.Top),
                  new Point(detection.Bounds.Left, detection.Bounds.Top)
               };

               image.Mutate(ctx => ctx.DrawPolygon(Rgba32.ParseHex("FF0000"), 2, rectangle));
            }

            // Save the modified image to a memory stream
            using (var ms = new MemoryStream())
            {
               image.SaveAsJpeg(ms);

              return new FileContentResult(ms.ToArray(), "image/jpg");
            }
         }
      }
   }
}

Summary

The initial code generated by Copilot was badly broken but with the assistance of Visual Studio 2022 Intellisense was fixed fairly quickly. The ILogger not being initialised, me using the “wrong” upload file name were easy to debug, but the FileContentResult exception was a bit more difficult.

It took me a quite a bit longer to write the function with Copilot desktop/Github Copilot than what it would have taken me normally. But, I think a lot of this was due to having to take screen shots, writing this blog post as I went, and having already written several Azure HTTP Trigger function for processing uploaded images.

The Copilot generated code in this post is not suitable for production

Building Edge AI with Github Copilot- Security Camera HTTP

I use GitHub Copilot with Visual Studio 2020 and I wanted to see if I could write a simple application only using “Ask Copilot”. Retrieving a static image from a security camera (like my HiLook IPC-T250H) felt like a reasonable challenge.

please write a console application that uses an HttpClient to retrieve an image from a security camera

static async Task Main(string[] args)
{
   string cameraUrl = "http://your-security-camera-url/image.jpg";
   string savePath = "downloaded_image.jpg";

   using (HttpClient client = new HttpClient())
   {
      try
      {
         HttpResponseMessage response = await client.GetAsync(cameraUrl);
         response.EnsureSuccessStatusCode();

         byte[] imageBytes = await response.Content.ReadAsByteArrayAsync();
         await File.WriteAllBytesAsync(savePath, imageBytes);

         Console.WriteLine("Image downloaded successfully.");
      }
      catch (Exception ex)
      {
         Console.WriteLine($"An error occurred: {ex.Message}");
      }
   }
}
}

The Github Copilot generated code compiled first time and looked reasonable for a Proof of Concept (PoC). After changing the “cameraUrl” the program could retrieve an image from one of my cameras when I disabled authentication.

please write a console application that uses an httpclient to retrieve an image from a security camera. The security camera needs a username and password

static async Task Main(string[] args)
{
   string cameraUrl = "http://your-security-camera-url/image";
   string username = "your-username";
   string password = "your-password";

   using (HttpClient client = new HttpClient())
   {
      var byteArray = new System.Text.ASCIIEncoding().GetBytes($"{username}:{password}");
      client.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("Basic", Convert.ToBase64String(byteArray));

      HttpResponseMessage response = await client.GetAsync(cameraUrl);
      response.EnsureSuccessStatusCode();

      byte[] imageBytes = await response.Content.ReadAsByteArrayAsync();
      File.WriteAllBytes("camera_image.jpg", imageBytes);

      Console.WriteLine("Image retrieved and saved as camera_image.jpg");
   }

Github Copilot had taken my request literally and implemented Hypertext Transfer Protocol (HTTP/1.1): Authentication (RFC7235). My HiLook IPC-T250H only supports Digest Authentication(RFC2617) so I tried

please modify the security code to support Digest authentication

static async Task Main(string[] args)
{
   string cameraUrl = "http://your-security-camera-url/image";
   string username = "your-username";
   string password = "your-password";

   using (HttpClient client = new HttpClient())
   {
      HttpResponseMessage response = await client.GetAsync(cameraUrl);

      if (response.StatusCode == System.Net.HttpStatusCode.Unauthorized)
      {
         var wwwAuthenticateHeader = response.Headers.WwwAuthenticate.FirstOrDefault();
         if (wwwAuthenticateHeader != null && wwwAuthenticateHeader.Scheme == "Digest")
         {
            var digestHeader = wwwAuthenticateHeader.Parameter;
            var digestResponse = CreateDigestHeader(digestHeader, cameraUrl, username, password, "GET");

            client.DefaultRequestHeaders.Authorization = new AuthenticationHeaderValue("Digest", digestResponse);

            response = await client.GetAsync(cameraUrl);
            response.EnsureSuccessStatusCode();

            byte[] imageBytes = await response.Content.ReadAsByteArrayAsync();
            File.WriteAllBytes("camera_image.jpg", imageBytes);

            Console.WriteLine("Image retrieved and saved as camera_image.jpg");
         }
      }
   }
}

private static string CreateDigestHeader(string digestHeader, string url, string username, string password, string method)
{
   var headerValues = digestHeader.Split(',').Select(v => v.Trim().Split('=')).ToDictionary(v => v[0], v => v[1].Trim('"'));

   string ha1 = CalculateMD5Hash($"{username}:{headerValues["realm"]}:{password}");
   string ha2 = CalculateMD5Hash($"{method}:{url}");
   string response = CalculateMD5Hash($"{ha1}:{headerValues["nonce"]}:{headerValues["nc"]}:{headerValues["cnonce"]}:{headerValues["qop"]}:{ha2}");

   return $"username=\"{username}\", realm=\"{headerValues["realm"]}\", nonce=\"{headerValues["nonce"]}\", uri=\"{url}\", qop={headerValues["qop"]}, nc={headerValues["nc"]}, cnonce=\"{headerValues["cnonce"]}\", response=\"{response}\", opaque=\"{headerValues["opaque"]}\"";
}

private static string CalculateMD5Hash(string input)
{
   using (MD5 md5 = MD5.Create())
   {
      byte[] inputBytes = Encoding.ASCII.GetBytes(input);
      byte[] hashBytes = md5.ComputeHash(inputBytes);
      return BitConverter.ToString(hashBytes).Replace("-", "").ToLower();
   }
}

Github Copilot had taken my request literally and implemented Digest Authentication(RFC2617) so I did some “research”. The Copilot desktop application suggested using NetworkCredential which supports Basic, and digest (also NTLM, and Kerberos) authentication.

modify the code to use a NetworkCredential to authenticate with the security camera

using System.Net;

namespace SecurityCameraClient
{
   internal class Program
   {
      static async Task Main(string[] args)
      {
         string cameraUrl = "http://your-security-camera-url/image.jpg";
         string savePath = "downloaded_image.jpg";
         string username = "your-username";
         string password = "your-password";

         using (HttpClientHandler handler = new HttpClientHandler { Credentials = new NetworkCredential(username, password) })
         using (HttpClient client = new HttpClient(handler))
         {
            try
            {
               HttpResponseMessage response = await client.GetAsync(cameraUrl);
               response.EnsureSuccessStatusCode();

               byte[] imageBytes = await response.Content.ReadAsByteArrayAsync();
               await File.WriteAllBytesAsync(savePath, imageBytes);

               Console.WriteLine("Image downloaded successfully.");
            }
            catch (Exception ex)
            {
               Console.WriteLine($"An error occurred: {ex.Message}");
            }
         }
      }
   }
}

The Github Copilot generated code compiled and after changing the “cameraUrl”, “username” and “password” the program could retrieve an image from each of my cameras.

I wanted the application to retrieve images regularly, but the first Github Copilot generated code was pretty ugly (it used a System.Timers.Timer). After “consulting” the Copilot desktop application

modify the code to use a System.Threading Timer so an image is retrieved every 1000mSec

using System.Net;

namespace SecurityCameraClient
{
   internal class Program
   {
      private static Timer _timer;
      private static HttpClient _client;
      private static string _cameraUrl = "http://your-security-camera-url/image.jpg";
      string savePath = "downloaded_image.jpg";

      static void Main(string[] args)
      {
         string username = "your-username";
         string password = "your-password";

         HttpClientHandler handler = new HttpClientHandler { Credentials = new NetworkCredential(username, password) };
         _client = new HttpClient(handler, disposeHandler: true);

         _timer = new Timer(async _ => await RetrieveImageAsync(), null, 0, 1000);

         Console.WriteLine("Press any key to exit...");
         Console.ReadKey();

         _timer.Dispose();
         _client.Dispose();
      }

      private static async Task RetrieveImageAsync()
      {
         try
         {
            HttpResponseMessage response = await _client.GetAsync(_cameraUrl);
            response.EnsureSuccessStatusCode();

            byte[] imageBytes = await response.Content.ReadAsByteArrayAsync();
            await File.WriteAllBytesAsync(_savePath, imageBytes);

            Console.WriteLine("Image downloaded successfully.");
         }
         catch (Exception ex)
         {
            Console.WriteLine($"An error occurred: {ex.Message}");
         }
      }
   }
}

By this stage I was an elite “prompt engineer” and after a couple of revisions of my Github Copilot prompt to released unmanaged resources (the initial version used .Dispose and was a bit ugly).

modify the code to dispose of the HttpClientHandler, HttpClient and Timer with “using” statements

The application had a reentrancy issue when retrieving an image from a camera took too long

modify the code to stop RetrieveImageAsync getting called while an image is already being retrieved

I then decided to try a “cosmetic” change

modify the code _timer does not have to be class level variable

The savePath was a constant and I wanted to store a series of images

modify the code to use String.Format to generate the savepath

For the final version I modified the program adding a Console.Writeline to display the build type, retrieving the SavePath, dueTime, and period from the appsettings.json. The Microsoft.Configuration.UserSecrets configuration source was used for the CameraUrl, UserName, and Password.

using System.Net;

using Microsoft.Extensions.Configuration;

namespace SecurityCameraClient
{
   internal class Program
   {
      private static HttpClient _client;
      private static bool _isRetrievingImage = false;
      private static ApplicationSettings _applicationSettings;

      static void Main(string[] args)
      {
         Console.WriteLine($"{DateTime.UtcNow:yy-MM-dd HH:mm:ss} SecurityCameraClient starting");
#if RELEASE
         Console.WriteLine("RELEASE");
#else
         Console.WriteLine("DEBUG");
#endif

         var configuration = new ConfigurationBuilder()
              .AddJsonFile("appsettings.json", false, true)
         .AddUserSecrets<Program>()
         .Build();

         _applicationSettings = configuration.GetSection("ApplicationSettings").Get<ApplicationSettings>();

         using (HttpClientHandler handler = new HttpClientHandler { Credentials = new NetworkCredential(_applicationSettings.Username, _applicationSettings.Password) })
         using (_client = new HttpClient(handler))
         using (var timer = new Timer(async _ => await RetrieveImageAsync(), null, _applicationSettings.TimerDue, _applicationSettings.TimerPeriod))
         {
            Console.WriteLine("Press any key to exit...");
            Console.ReadKey();
         }
      }

      private static async Task RetrieveImageAsync()
      {
         if (_isRetrievingImage) return;

         _isRetrievingImage = true;
         try
         {
            HttpResponseMessage response = await _client.GetAsync(_applicationSettings.CameraUrl);
            response.EnsureSuccessStatusCode();

            byte[] imageBytes = await response.Content.ReadAsByteArrayAsync();
            string savePath = string.Format(_applicationSettings.SavePath, DateTime.UtcNow);
            await File.WriteAllBytesAsync(savePath, imageBytes);

            Console.WriteLine("Image downloaded successfully.");
         }
         catch (Exception ex)
         {
            Console.WriteLine($"An error occurred: {ex.Message}");
         }
         finally
         {
            _isRetrievingImage = false;
         }
      }
   }

   public class ApplicationSettings
   {
      public string CameraUrl { get; set; } = "";

      public string SavePath { get; set; } = "";

      public string Username { get; set; } = "";

      public string Password { get; set; } = "";

      public TimeSpan TimerDue { get; set; } = TimeSpan.Zero;

      public TimeSpan TimerPeriod { get; set; } = TimeSpan.Zero;
   }
}

Overall, my Github Copilot experience was pretty good, and got better as my “prompt engineering” improved.

The Github Copilot “decision” to implement Hypertext Transfer Protocol (HTTP/1.1): Authentication (RFC7235) and Digest Authentication(RFC2617) was “sub optimal”

IoT and AI Building Edge AI with AI- Introduction

Introduction

Recently, there have been mainstream media articles (Forbes, Wired-Video games , Wired-Copilot, BBC, The Guardian etc.), about how Artificial Intelligence (AI) could soon replace developers.

I have been using Github Copilot, mainly for “enhanced” Visual Studio IntelliSense, code snippets, and refactoring. I have also found Github Copilot generated code samples and more context aware searching of documentation really useful. Overall, the experience has been positive with limited hallucinations and my productivity has got better as my “prompt engineer” skills improved.

Initially, I started building “hobbyist” AI applications for the edge with an Azure Percept and for customer projects use Ultralytics Yolo, Azure Machine Learning, Azure Cognitive Services, ML.Net, Open Neural Network Exchange (ONNX), Nvidia Jeston Modules, Seeedstudio reComputer Industrial J3011 – Fanless Edge AI Devices etc.

After some discussion with my co-workers I decided to explore how much Github Copilot helped building a “non-trivial” computer vision Proof of Concept (PoC). In previous blog posts I have written about capturing still images from a Security Camera so figured that was a good place to start.

Even though I have been using Copilot for a while I still say please.

using System;
using System.IO;
using System.Net.Http;
using System.Threading.Tasks;

class Program
{
    static async Task Main(string[] args)
    {
        string cameraUrl = "http://your-security-camera-url/image.jpg"; // Replace with your security camera URL
        string savePath = "C:\\path\\to\\save\\image.jpg"; // Replace with the path where you want to save the image

        using (HttpClient client = new HttpClient())
        {
            try
            {
                HttpResponseMessage response = await client.GetAsync(cameraUrl);
                response.EnsureSuccessStatusCode();

                byte[] imageBytes = await response.Content.ReadAsByteArrayAsync();
                await File.WriteAllBytesAsync(savePath, imageBytes);

                Console.WriteLine("Image retrieved and saved successfully.");
            }
            catch (Exception ex)
            {
                Console.WriteLine($"An error occurred: {ex.Message}");
            }
        }
    }
}

The generated code compiled first time, looked “ok” and worked with some of my security cameras when the cameraurl and save path were modified

I then modified the code so “sensitive” information was not stored in code or configuration files.

//using System;
//using System.IO;
//using System.Net.Http;
//using System.Reflection;
//using System.Threading.Tasks;

using Microsoft.Extensions.Configuration;

class Program
{
   static async Task Main(string[] args)
   {
      //string cameraUrl = "http://your-security-camera-url/image.jpg"; // Replace with your security camera URL
      //string savePath = "C:\\path\\to\\save\\image.jpg"; // Replace with the path where you want to save the image
      Model.ApplicationSettings applicationSettings;

   Console.WriteLine($"{DateTime.UtcNow:yy-MM-dd HH:mm:ss} 0.2.CameraHTTPClient starting");
#if RELEASE
         Console.WriteLine("RELEASE");
#else
      Console.WriteLine("DEBUG");
#endif
      // load the app settings into configuration
      var configuration = new ConfigurationBuilder()
           .AddJsonFile("appsettings.json", false, true)
      .AddUserSecrets<Program>()
      .Build();

      applicationSettings = configuration.GetSection("ApplicationSettings").Get<Model.ApplicationSettings>();

      using (HttpClient client = new HttpClient())
      {
         try
         {
            HttpResponseMessage response = await client.GetAsync(applicationSettings.CameraUrl);
            response.EnsureSuccessStatusCode();

            byte[] imageBytes = await response.Content.ReadAsByteArrayAsync();
            await File.WriteAllBytesAsync(applicationSettings.SavePath, imageBytes);

            Console.WriteLine("Image retrieved and saved successfully.");
         }
         catch (Exception ex)
         {
            Console.WriteLine($"An error occurred: {ex.Message}");
         }
      }
   }
}

namespace Model
{
   public class ApplicationSettings
   {
      public string CameraUrl { get; set; } = "";

      public string SavePath { get; set; } = "Images";
   }
}

Several of my security cameras (e.g. HILook T250H) required client to authentication so I added NetworkCredential my Copilot prompt.

// please write c# console application that uses httpclient and an httpget to retrieve image from security camera with NetworkCredential security
//
// Sure! Here's a C# console application that uses HttpClient and HttpGet to retrieve an image from a security camera with NetworkCredential security: 
// In this example:
//
// Replace http://your-security-camera-url/image.jpg with the actual URL of your security camera's image.
//
// Replace C:\\path\\to\\save\\image.jpg with the path where you want to save the retrieved image.
//
// Replace your-username and your-password with your actual credentials.
//
// This application uses HttpClient with NetworkCredential to authenticate the request, retrieves the image as a byte array, and saves it to the specified path. If there's an error during the process, it will be caught and displayed in the console.
//
// Happy coding! 🚀
//using System;
//using System.IO;
using System.Net;
//using System.Net.Http;
//using System.Threading.Tasks;

using Microsoft.Extensions.Configuration;

class Program
{
   static async Task Main(string[] args)
   {
      //string cameraUrl = "http://your-security-camera-url/image.jpg"; // Replace with your security camera URL
      //string savePath = "C:\\path\\to\\save\\image.jpg"; // Replace with the path where you want to save the image
      //string username = "your-username"; // Replace with your username
      //string password = "your-password"; // Replace with your password
      Model.ApplicationSettings applicationSettings;

      Console.WriteLine($"{DateTime.UtcNow:yy-MM-dd HH:mm:ss} 0.3.CameraHTTPClient starting");
#if RELEASE
         Console.WriteLine("RELEASE");
#else
      Console.WriteLine("DEBUG");
#endif
      // load the app settings into configuration
      var configuration = new ConfigurationBuilder()
           .AddJsonFile("appsettings.json", false, true)
      .AddUserSecrets<Program>()
      .Build();

      applicationSettings = configuration.GetSection("ApplicationSettings").Get<Model.ApplicationSettings>();

      using (HttpClientHandler handler = new HttpClientHandler { Credentials = new NetworkCredential(applicationSettings.Username, applicationSettings.Password) })
      using (HttpClient client = new HttpClient(handler))
      {
         try
         {
            HttpResponseMessage response = await client.GetAsync(applicationSettings.CameraUrl);
            response.EnsureSuccessStatusCode();

            byte[] imageBytes = await response.Content.ReadAsByteArrayAsync();
            await File.WriteAllBytesAsync(applicationSettings.SavePath, imageBytes);

            Console.WriteLine("Image retrieved and saved successfully.");
         }
         catch (Exception ex)
         {
            Console.WriteLine($"An error occurred: {ex.Message}");
         }
      }
   }
}

namespace Model
{
   public class ApplicationSettings
   {
      public string CameraUrl { get; set; } = "";

      public string SavePath { get; set; } = "Images";

      public string Username { get; set; } = "";

      public string Password { get; set; } = "";
   }
}

My Visual Studio 2022 solution with a project for each Copilot generated sample.

Summary

The Copilot generated code for my three “trivial” PoC applications compiled and worked with minimal modifications.

The Copilot generated code in this post is not suitable for production