Saturday, August 7, 2021

 

<#

 

.SYNOPSIS

 

This script can be called from a runbook and uses Azure REST methods.

Unlike user identity, applications and service principals cannot connect to Az account.

This module shows how to get a token so that resources can be created, updated and deleted using REST methods.

#https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-oauth2-client-creds-grant-flow

#similar to Connect-AzAccount -identity

#>

 

 

 

function Get-Payload() {

param (

    [Parameter(Mandatory=$true)][string]$ClientId,

    [Parameter(Mandatory=$true)][string]$ClientSecret,

    [string]$Resource = "https://management.core.windows.net/"

)

$encoded=[System.Web.HttpUtility]::UrlEncode($ClientSecret)

$payload = "grant_type=client_credentials&client_id=$ClientId&client_secret=$encoded&resource=$Resource"

return $payload

}

 

function Get-Token(){

param (

    [Parameter(Mandatory=$true)][string]$TenantId,

    [Parameter(Mandatory=$true)][string]$ClientId,

    [Parameter(Mandatory=$true)][string]$ClientSecret,

    [string]$Resource = "https://management.core.windows.net/",

     [string]$RequestAccessTokenUri = "https://login.microsoftonline.com/$TenantId/oauth2/token"

)

$payload = Get-Payload $ClientId $ClientSecret

$Token = Invoke-RestMethod -Method Post -Uri $RequestAccessTokenUri -body $payload -ContentType 'application/x-www-form-urlencoded'

return $Token

}

 

function Get-ResourceGroups(){

param (

    [Parameter(Mandatory=$true)][string]$TenantId,

    [Parameter(Mandatory=$true)][string]$SubscriptionId,

    [Parameter(Mandatory=$true)][string]$ClientId,

    [Parameter(Mandatory=$true)][string]$ClientSecret,

    [Parameter(Mandatory=$true)][string]$ResourceGroupName,

    [string]$Resource = "https://management.core.windows.net/",

    [string]$environment = "AzureCloud",

    [string]$RequestAccessTokenUri = "https://login.microsoftonline.com/$TenantId/oauth2/token"

)

$Token = Get-Token $TenantId ClientId $ClientSecret $Resource $RequestAccessTokenUri

$ApiUri = "https://management.azure.com/subscriptions/$($SubscriptionId)/resourcegroups?api-version=2017-05-10"

$Headers = @{}

$Headers.Add("Authorization","$($Token.token_type) "+ " " + "$($Token.access_token)")

$ResourceGroups = Invoke-RestMethod -Method Get -Uri $ApiUri -Headers $Headers

return $ResourceGroups

}

 

function Get-Cache() {

param (

    [Parameter(Mandatory=$true)][string]$TenantId,

    [Parameter(Mandatory=$true)][string]$SubscriptionId,

    [Parameter(Mandatory=$true)][string]$ClientId,

    [Parameter(Mandatory=$true)][string]$ClientSecret,

    [Parameter(Mandatory=$true)][string]$ResourceGroupName,

    [Parameter(Mandatory=$true)][string]$CacheName,

    [string]$Resource = "https://management.core.windows.net/",

    [string]$environment = "AzureCloud",

    [string]$RequestAccessTokenUri = "https://login.microsoftonline.com/$TenantId/oauth2/token"

)

$Token = Get-Token $TenantId $ClientId $ClientSecret $Resource $RequestAccessTokenUri

$ApiUri="https://management.azure.com/subscriptions/$SubscriptionId/resourceGroups/$ResourceGroupName/providers/Microsoft.Cache/redis/$($CacheName)?api-version=2020-06-01"

$Headers = @{}

$Headers.Add("Authorization","$($Token.token_type) "+ " " + "$($Token.access_token)")

$Cache = Invoke-RestMethod -Method Get -Uri $ApiUri -Headers $Headers

return $Cache

}

 

function New-Cache() {

param (

    [Parameter(Mandatory=$true)][string]$TenantId,

    [Parameter(Mandatory=$true)][string]$SubscriptionId,

    [Parameter(Mandatory=$true)][string]$ClientId,

    [Parameter(Mandatory=$true)][string]$ClientSecret,

    [Parameter(Mandatory=$true)][string]$ResourceGroupName,

    [Parameter(Mandatory=$true)][string]$CacheName,

    [string]$Resource = "https://management.core.windows.net/",

    [string]$environment = "AzureCloud",

    [string]$RequestAccessTokenUri = "https://login.microsoftonline.com/$TenantId/oauth2/token"

)

$CacheName = "AGS-redis-"

$guid = New-Guid

$CacheName = $CacheName + $guid.Guid

$Token = Get-Token $TenantId $ClientId $ClientSecret $Resource $RequestAccessTokenUri

$ApiUri = "https://management.azure.com/subscriptions/$SubscriptionId/resourceGroups/$ResourceGroupName/providers/Microsoft.Cache/redis/$($CacheName)?api-version=2020-06-01"

$payload = @"

{

  "location": "West US 2",

  "properties": {

    "sku": {

      "name":"Premium",

      "family":"P",

      "capacity":1

    },

    "size": "P1"

  }

}

"@

$Headers = @{}

$Headers.Add("Authorization","$($Token.token_type) "+ " " + "$($Token.access_token)")

$Cache = Invoke-RestMethod -contentType "application/json" -Method Put -Uri $ApiUri -Headers $Headers -Body $payload

return $Cache

}

 

function Remove-Cache() {

param (

    [Parameter(Mandatory=$true)][string]$TenantId,

    [Parameter(Mandatory=$true)][string]$SubscriptionId,

    [Parameter(Mandatory=$true)][string]$ClientId,

    [Parameter(Mandatory=$true)][string]$ClientSecret,

    [Parameter(Mandatory=$true)][string]$ResourceGroupName,

    [Parameter(Mandatory=$true)][string]$CacheName,

    [string]$Resource = "https://management.core.windows.net/",

    [string]$environment = "AzureCloud",

    [string]$RequestAccessTokenUri = "https://login.microsoftonline.com/$TenantId/oauth2/token"

)

$Token = Get-Token $TenantId $ClientId $ClientSecret $Resource $RequestAccessTokenUri

$ApiUri="https://management.azure.com/subscriptions/$SubscriptionId/resourceGroups/$ResourceGroupName/providers/Microsoft.Cache/redis/$($CacheName)?api-version=2020-06-01"

$Headers = @{}

$Headers.Add("Authorization","$($Token.token_type) "+ " " + "$($Token.access_token)")

$Cache = Invoke-RestMethod -Method Delete -Uri $ApiUri -Headers $Headers

return $Cache

}

 

Export-ModuleMember -Function Get-Token

Export-ModuleMember -Function Get-ResourceGroups

Export-ModuleMember -Function New-Cache

Export-ModuleMember -Function Get-Cache

Export-ModuleMember -Function Remove-Cache

 #codingexercise https://1drv.ms/w/s!Ashlm-Nw-wnWrwRgdOFj3KLA0XSi

Friday, August 6, 2021

 

Introduction:

This article is a continuation of the series of articles starting with the description of SignalR service. In this article, we continue with our study of Azure Stream Analytics from the last article. We were comparing Apache Flink and Kafka with The Azure Stream Analytics and were observing comparisons between Flink and Azure stream analytics to determine watermarks and sequence events along their timelines. We now investigate automations for performing stream analysis.

Jobs and clusters form the two main components of the stream analytics. When the job is created, the deployment can be validated. The job itself is represented by an ARM template which is a JSON notation and it defines the infrastructure and configuration for the project. The template uses declarative syntax so there is no need to write commands to create the deployment. The template takes parameters such as the location, stream analytics job name and number of streaming units which is then applied to the resource and the job is created.

Deployment using the template can be kicked off directly from the CLI, PowerShell, portal and SDK. All of these provide programmability options.  Resources can be cleaned up by deleting the resource group.

The input to the job can also be configured with the help of a Powershell cmdlet. The New-AzStreamAnalyticsInput cmdlet takes the job name, job input name and resource group name, and the job input definition as parameters. Even the blob storage can be passed in as an input. AccessPolicyKey and Shared AccessKey are derived from the connection strings to the data source. The output to the job is similarly configured with the help of a JobOutputDefinition and it takes the storage account access key as a parameter. Blobs will be stored in a container from that account. Finally the transformation query can be specified via the New-AzStreamAnalyticsTransformation cmdlet which takes the job name, job transformation name, resource group name, and the job transformation definition as parameters. This declaration contains a query property that defines the transformation query.

The Start-AzStreamAnalyticsJob cmdlet takes the job name, resource group name, output start mode and startTime as parameters and kick starts the job.

Thursday, August 5, 2021

 

Introduction:

This article is a continuation of the series of articles starting with the description of SignalR service. In this article, we continue with our study of Azure Stream Analytics from the last article. We were comparing Apache Flink and Kafka with The Azure Stream Analytics and were observing the utilization of Kubernetes to leverage containers for the clusters and running jobs for analysis. One of the most interesting applications of stream processing is the support for watermarks and we explore this comparison next.

Flink provides three different types of processing based on timestamps which are independent of the above two methods. There can be three different types of timestamps corresponding to: processing time, event time and ingestion time. 

 

Out of these only the event time guarantees completely consistent and deterministic results. All three processing types can be set on the StreamExecutionEnvironment prior to the execution of queries. 

Event time also support watermarks. Watermarks is the mechanism in Flink to measure progress in event time. They are simply inlined with the events. As a processor advances its timestamp, it introduces a watermark for the downstream operators to process. In the case of distributed systems where an operator might get inputs from more than one streams, the watermark on the outgoing stream is determined from the minimum of the watermarks from the invoking streams. As the input streams update their event times, so does the operator. Flink also provides a way to coalesce events within the window. 

 

Flink-connector has an EventTimeOrderingOperator.  This uses watermark and managed state to buffer elements which helps to order elements by event time. This class extends the AbstractStreamOperator and implements the OneInputStreamOperator. The last seen watermark is initialized to min value. It uses a timer service and mapState stashed in the runtime Context. It processes each stream record one by one. If the event does not have a timestamp, it simply forwards. If the event has a timestamp, it buffers all the events between the current and the next watermark. 

When the event Timer fires due to watermark progression, it polls all the event time stamp that are less than or equal to the current watermark. If the timestamps are empty, the queued state is cleared otherwise the next watermark is registered. The sorted list of timestamps from buffered events is maintained in a priority queue. 

AscendingTimestampExtractor is a timestamp assigner and watermark generator for streams where timestamps are monotonously ascending. The timestamps continuously increase for data such as log files. The local watermarks are easily assigned because they follow the strictly increasing timestamps which are periodic.  

 

Microsoft Azure Stream analytics also follows a timeline for events. There are two choices – arrival time and application/event time.   It bases its watermark on the largest event time the service has seen minus the out of order tolerance window size. If there are no incoming event, the watermark is the current estimated arrival time minus the late arrival tolerance window.  This can only be estimated because the real arrival time is on the data forwarders such as Event Hubs. The design serves two additional purposes other than generating watermarks – the system generates results in a timely fashion with or without incoming events and the system behavior needs to be repeatable. Since the data forwarder guarantees continuously increasing streaming data, the service disregards configurations for out-of-order tolerance and late arrival tolerance when analytics applications choose arrival time as event time.

 

Wednesday, August 4, 2021

 Introduction: 

This article is a continuation of the series of articles starting with the description of SignalR service. 

In this article, we continue with our study of Azure Stream Analytics from the last article. We were comparing Apache Flink and Kafka with The Azure Stream Analytics and were observing the utilization of Kubernetes to leverage containers for the clusters and running jobs for analysis. 

Windows Azure also hosts Kubernetes for customers. Native Containers are small and fast. They have two characteristics. First, the containers are isolated from each other and from the host in that they even have their own file system. which makes it portable across cloud and os distributions. Second, the immutable container images can be created at build/release time rather than the deployment time of the application since each application doesn't need to be composed with the rest of the application stack nor tied to the production infrastructure environment. Kubernetes extends this idea of app+container all the way where the host can be nodes of a cluster. Kubernetes evolved as an industry effort from the native Linux containers support of the operating system.  It can be considered as a step towards a truly container-centric development environment. Containers decouple applications from infrastructure which separates dev from ops. Containers demonstrate better resource isolation and improved resource utilization. 

At this point, it is important to differentiate Kubernetes from PaaS.  Kubernetes is not a traditional, all-inclusive PaaS. Unlike PaaS that restricts applications, dictates the choice of application frameworks, restricts supported language runtimes, or distinguishes apps from services, Kubernetes aims to support an extremely diverse variety of workloads. If the application has been compiled to run in a container, it will work with Kubernetes. PaaS provides databases, message buses, cluster storage systems but those can run on Kubernetes. There is also no click to deploy service marketplace. Kubernetes does not build user code or deploy it but it facilitates CI workflows to run on it. 

Kubernetes allows users to choose to log, monitoring and alerting Kubernetes also does not require a comprehensive application language or system. It is independent of machine configuration or management. But PaaS can run on Kubernetes and extend its reach to different clouds. 

The section talks about Azure stream analytics cluster this cluster offers a single tenant deployment for complex streaming scenarios clusters can process more than 200 megabytes per second in real time the jobs running on these clusters can leverage the features of they should stream analytics add can directly read from inputs and outputs that a private to organization clusters are built by streaming units (SU) which represent a unit of CPU and memory resources allocated to a cluster. Cluster size can range from 36 SUs to 216 Sus. 

Since the clusters our single tenant dedicated clusters, they can be run in a multi-tenant environment with complete isolation from other tenants. These clusters can be scaled, and a virtual net added to support your jobs to connect to resources securely over private endpoints. These clusters come with zero maintenance cost so that the application can focus only on the streaming jobs with sharing of results with multiple teams. 


Tuesday, August 3, 2021

Azure stream analytics

 Introduction:

This article is a continuation of the series of articles starting with the description of SignalR service sometime back. In this article, we focus on stream analytics from Azure. As the name suggests, this is a service used for analyzing events that are ordered and arrive in a continuous manner. Like its industry counterparts, this service also defines notions of jobs and running them on clusters analysis done with the help of data arriving in this form include identifying patterns and relationships and applies to data sources that range from devices sensors clickstreams social media feeds and other applications action be taken certain patterns and workflow scared that provide alerts and notifications to users’ data can also be transformed and channeled via pipelines for automating. This service is available on Azure IoT Edge runtime environment and enables processing data on those devices.

Data from device traffic usually build timestamps and are discreet, often independent of one another. They are also characterized as unstructured data and arriving in an ordered manner where it's generally not possible to store all of them at once for subsequent analysis. When the analysis is done in batches it becomes a batch processing job that runs on a cluster and scales out batches to different nodes, as many as the cluster will allow. Holding sets of data events in batches might introduce latency, so the notion of micro batching is introduced for more processing. Streaming actions, take it even further to process one event at a time.

Some of the use cases for continuous events involve geospatial analytics for fleet management and driverless vehicles weblogs in clickstream analytics and point of sale data from inventory control. In all these cases there is a point of ingestion from data sources typically via Azure Event Hubs, IoT hub, or BLOB storage. Even tottering options and time windows can be suitably adjusted to perform aggregations. The language of query is SQL and it can be extended with JavaScript or C sharp user-defined functions. Queries written in SQL are easy to apply to filtering, sorting, and aggregation. The topology between ingestion and delivery is handled by this stream analytics service while allowing extensions with the help of reference data stores, Azure functions, and real-time scoring via machine learning services. Event Hubs, Azure BLOB storage, and IoT hubs can collect data on the ingestion side, while they are distributed after analysis via alerts and notifications, dynamic dashboarding, data warehousing, and storage/archival. The fan-out of data to different services is itself a value addition but the ability to transform events into processed events also generates more possibilities for downstream usages including reporting and visualizations. As with all the services in the Azure portfolio, it comes with standard deployment using Azure resource manager templates, health monitoring via Azure monitoring, billing usages that can drive down costs, and various forms of programmability options such as SDK, REST-based API services, command-line interfaces, and PowerShell automation. It is a fully managed PaaS offering so the infrastructure and workflow initializers need not be set up by hand. It can also run in the cloud and scale to many events with relatively low latency. This service is not only production ready but also reliable in mission-critical deployments. Security and compliance are not sacrificed for the sake of performance. Finally, it integrates with Visual Studio to bring comprehensive testing, debugging, publishing, and authoring convenience.


Monday, August 2, 2021

 <#

 

.SYNOPSIS

 

This script is used to create, update and delete resources using Azure REST methods.

This is helpful to make deployments idempotent which is a requisite for Azure.

 

#>

 

 

param (

    [Parameter(Mandatory=$true)][string]$TenantId,

    [Parameter(Mandatory=$true)][string]$SubscriptionId,

    [string]$ClientId = "1b730954-1685-4b74-9bfd-dac224a7b894", # PowerShell Client Id

    [string]$ClientSecret = "",

    [string]$Resource = "https://management.core.windows.net/",

    [string]$environment = "AzureCloud",

    [string]$RequestAccessTokenUri = "https://login.microsoftonline.com/$TenantId/oauth2/token"

)

 

function getPayload() {

$encoded=[System.Web.HttpUtility]::UrlEncode($ClientSecret)

$payload = "grant_type=client_credentials&client_id=$ClientId&client_secret=$encoded&resource=$Resource"

return $payload

}

 

function getToken(){

$payload = getPayload

$Token = Invoke-RestMethod -Method Post -Uri $RequestAccessTokenUri -body $payload -ContentType 'application/x-www-form-urlencoded'

Write-Host "Print Token $Token" -ForegroundColor Green

return $Token

}

 

function getResourceGroups(){

$Token = getToken

# Get Azure Resource Groups

$ResourceGroupApiUri = "https://management.azure.com/subscriptions/$SubscriptionId/resourcegroups?api-version=2017-05-10"

$Headers = @{}

$Headers.Add("Authorization","$($Token.token_type) "+ " " + "$($Token.access_token)")

$ResourceGroups = Invoke-RestMethod -Method Get -Uri $ResourceGroupApiUri -Headers $Headers

Write-Host "Print Resource groups $ResourceGroups" -ForegroundColor Green

Write-Output $ResourceGroups

return $ResourceGroups

}

Write-Host TenantId=$TenantId SubscriptionId=$SubscriptionId ClientId=$ClientId ClientSecret=$ClientSecret

getResourceGroups

 

<# Sample output

value

-----

{@{id=/subscriptions/<obfuscated>/resourceGroups/cloud-shell-storage-southcentralus; name=cloud-shell-storage-southcentralus; location=southcentralus; properties=}, @{id=/subscriptions/<obfuscated>/resourceGroups/SFQuickstartRG; name=SFQuickstartRG; location=southc...

{@{id=/subscriptions/<obfuscated>/resourceGroups/cloud-shell-storage-southcentralus; name=cloud-shell-storage-southcentralus; location=southcentralus; properties=}, @{id=/subscriptions/<obfuscated>/resourceGroups/SFQuickstartRG; name=SFQuickstartRG; location=southc...

 

#>


Sunday, August 1, 2021

 

Azure Reservation Management:

 

Introduction: this article is a continuation of a series of articles on Azure services beginning with the signal R that was written earlier than this. In this article, we explore assure resource reservations available for cost management and billing. One of the advantages of using Azure public cloud is that it provides complete visibility into the building and cost aspects of requesting resources from its portfolio of services and the usages are also continuously monitored which helps both plan and predict for future expenses using Microsoft cloud businesses can now trim their inefficiencies an analyze manage and optimize the costs on workload by workload basis this suite of services helps ensure that the organization can take advantages of the benefits provided by the cloud some of the cost management and billing features include conducting administrative tasks such as paying the bill, managing the billing access to costs, downloading the costs and usage data, proactively applying data analysis to cause, setting spending threshold set, identifying opportunities for workload changes and many such others.

A sure cost management and best practice involves a virtuous cycle of visibility accountability and optimization in saving costs this cycle can be better understood when we review the features from the actions that can be taken on the billing account when the billing account is created at the time of signing up to Azure it begins to accumulate invoices payments and cost tracking measurements. There can be more than one billing accounts. Some accounts begin with the pay as you go billing model it can account for resource usage is and allow the option for users to terminate resources when a threshold is exceeded. Other accounts fall under enterprise and customer agreements and they are typically signed business to business or in the latter case when the organization signs up for a customer agreement with Microsoft billing differs from cost management altogether while billing is the process of invoicing customers for goods or services and managing the commercial relationship cost management is an operational practice it identifies costs and usage patterns that can be provided with advanced analytics and reports based on the negotiated prices and factors in reservations it can provide even discounts the reports on internal and external costs based on usages and marketplace charges can be collectively presented via cost management features these reports help understand the drilldown on spending as well as the breakouts under different categories some predictive analytics are also available which help identify the resources that cost more than others. One such feature is a reservation and as your resource reservation helps save money by committing to a one year or three year plan for multiple products this commitment gets some discount on the resources despite their usage it can significantly reduce resource costs and in some cases up to 72% of paid pay as you go prices when they play discount they don't alter the runtime state of the resources so it's merely a policy the total cost of upfront and monthly reservations is the same and we don't pay any extra fee when we choose to go monthly there are some advantages to buying preservations such as an immediate reservation discount not being charged for resources on a continuous basis and tolerating fluctuations. certain attributes for reservations determine the resource to be purchased choices between SKUs and regions wherever applicable and scope can change the instance being reserved determining what to purchase is one of the key decisions in cost management and any such decision can be applied on an hourly basis well it's easy to buy reservations online via the Azure portal the same can be done via API's PowerShell is decays and command line interfaces the billing for a reservation proceeds from a subscription but the reservation can even be applied two different subscription. Reservation can also be split into two reservations if the assured result virtual machine instance is purchased then a reservation discount can be applied to that resource. at the time of purchase there are two objects created a reservation order and reservation actions such as split merge partial refund or exchange created new reservations are included under the reservation order it can be viewed by selecting the reservations and navigating to the order ID. The reservation scope determines the set of resources to which the reservation applies the billing contest is dependent on the subscription used to buy the reservation if the reservation scope is changed from shared to single then only the owner can select this some of their subscriptions for the reservation scope, but enterprise agreement and Microsoft customer agreement billing contributors can manage hold reservations for their organizations. there are two ways to go about sharing this privilege first access management can be delete-delegated for an individual reservation order by assigning the owner role to the individual at the resource scope of the reservation order the other way is to use a user as a billing administrator to an agreement backed reservation. all users go to the shore portal to manage their costs from the cost management and billing section of the portal. There are some extended features available for self-service exchanges and refunds for Azure reservations, but the reservations must be similar for their users to take advantage of these features exchanges can work both ways from downsizing to upscaling also service features are available from the portal.