As it turns out, as of Service Fabric runtime 6.1, allowing applications to change their Default Services during an upgrade is not enabled by default. This can be controlled by the setting called 'EnableDefaultServicesUpgrade' in the cluster level setting group called 'ClusterManager'. This setting can be set in the Cluster Manifest if you're managing your own cluster on-premise, or it can be set in an ARM template if deploying to an Azure-based cluster like so:
{
"apiVersion": "2016-09-01",
"type": "Microsoft.ServiceFabric/clusters",
"name": "[parameters('clusterName')]",
"location": "[parameters('location')]",
"dependsOn": [
"[variables('supportLogStorageAccountName')]"
],
"properties": {
"certificate": {
"thumbprint": "[parameters('certificateThumbprint')]",
"x509StoreName": "[parameters('certificateStoreValue')]"
},
"clientCertificateCommonNames": [],
"clientCertificateThumbprints": [],
"clusterState": "Default",
"diagnosticsStorageAccountConfig": {
"blobEndpoint": "[reference(concat('Microsoft.Storage/storageAccounts/', variables('supportLogStorageAccountName')), '2017-06-01').primaryEndpoints.blob]",
"protectedAccountKeyName": "StorageAccountKey1",
"queueEndpoint": "[reference(concat('Microsoft.Storage/storageAccounts/', variables('supportLogStorageAccountName')), '2017-06-01').primaryEndpoints.queue]",
"storageAccountName": "[variables('supportLogStorageAccountName')]",
"tableEndpoint": "[reference(concat('Microsoft.Storage/storageAccounts/', variables('supportLogStorageAccountName')), '2017-06-01').primaryEndpoints.table]"
},
"fabricSettings": [
{
"parameters": [
{
"name": "ClusterProtectionLevel",
"value": "[parameters('clusterProtectionLevel')]"
}
],
"name": "Security"
},
{
"parameters": [
{
"name": "EnableDefaultServicesUpgrade",
"value": "[parameters('enableDefaultServicesUpgrade')]"
}
],
"name": "ClusterManager"
}
],
"managementEndpoint": "[concat('https://',reference(variables('lbIPName')).dnsSettings.fqdn,':',variables('nt0fabricHttpGatewayPort'))]",
"nodeTypes": [
{
"name": "[variables('vmNodeType0Name')]",
"applicationPorts": {
"endPort": "[variables('nt0applicationEndPort')]",
"startPort": "[variables('nt0applicationStartPort')]"
},
"clientConnectionEndpointPort": "[variables('nt0fabricTcpGatewayPort')]",
"durabilityLevel": "Bronze",
"ephemeralPorts": {
"endPort": "[variables('nt0ephemeralEndPort')]",
"startPort": "[variables('nt0ephemeralStartPort')]"
},
"httpGatewayEndpointPort": "[variables('nt0fabricHttpGatewayPort')]",
"isPrimary": true,
"vmInstanceCount": "[parameters('nt0InstanceCount')]"
}
],
"provisioningState": "Default",
"reliabilityLevel": "Silver",
"upgradeMode": "Automatic",
"vmImage": "Windows"
},
"tags": {
"resourceType": "Service Fabric",
"displayName": "IoT Service Fabric Cluster",
"clusterName": "[parameters('clusterName')]"
}
}
Showing posts with label azure. Show all posts
Showing posts with label azure. Show all posts
Thursday, March 29, 2018
Sunday, August 13, 2017
Azure Automation does not support the Process{} block
There's an undocumented bug in Azure Automation: it does NOT support the Process { } block, i.e. the block that you would use when creating scripts / modules where you need to support piping in your commands.
I discovered this after much trial and error trying to run a runbook and having it silently and mysteriously fail with no output whatsoever, no matter what verbosity and progress settings I enabled.
I discovered this after much trial and error trying to run a runbook and having it silently and mysteriously fail with no output whatsoever, no matter what verbosity and progress settings I enabled.
Re-adding a Hybrid Runbook Worker to an Automation Account runbook worker group
If you receive the error: Add-HybridRunbookWorker : Machine is already registered to different account
Simply delete the key under: HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\HybridRunbookWorker and try the command again.
Thanks go to Wayne Hoggett for pointing this out, this point reprints his.
Simply delete the key under: HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\HybridRunbookWorker and try the command again.
Thanks go to Wayne Hoggett for pointing this out, this point reprints his.
Tuesday, July 18, 2017
Adding Azure Active Directory OAuth 2.0 authentication to a Service Fabric Web API (Stateless) service
... is pretty much the same as adding it to a normal Web API 2.0 application:
[Authorize]
public class ValuesController : ApiController
{
[Authorize]
public class ValuesController : ApiController
{
}
Then in your Startup.cs file:
// This code configures Web API. The Startup class is specified as a type
// parameter in the WebApp.Start method.
public static void ConfigureApp(IAppBuilder appBuilder)
{
CodePackageActivationContext activationContext = FabricRuntime.GetActivationContext();
ConfigurationPackage configurationPackageObject = activationContext.GetConfigurationPackageObject("Config");
ConfigurationSection configurationSection = configurationPackageObject.Settings.Sections["ActiveDirectoryServiceConfigSection"];
appBuilder.UseWindowsAzureActiveDirectoryBearerAuthentication(
new WindowsAzureActiveDirectoryBearerAuthenticationOptions
{
Tenant = configurationSection.Parameters["TenantName"].Value,
TokenValidationParameters = new TokenValidationParameters
{
ValidAudience = configurationSection.Parameters["AppIdUri"].Value
},
Provider = new OAuthBearerAuthenticationProvider
{
OnValidateIdentity = OnValidateUserIdentityAsync
}
});
// Configure Web API for self-host.
HttpConfiguration config = new HttpConfiguration();
config.Routes.MapHttpRoute(
name: "DefaultApi",
routeTemplate: "api/{controller}/{id}",
defaults: new { id = RouteParameter.Optional }
);
appBuilder.UseWebApi(config);
}
The trick here is to ** ENSURE THAT WAAD BEARER AUTHENTICATION GETS REGISTERED BEFORE REGISTERING WEB API!!! **
Labels:
active directory,
API,
azure,
fabric,
oauth2,
service,
service fabric,
web,
webapi2
Sunday, July 16, 2017
Getting Started with deploying your first Azure Service Fabric resource
I've recently started getting on the Service Fabric band wagon, running in Azure in my case. When you run Service Fabric in Azure (vs on-prem), things are a little bit different. Instead of running your own manager tool on some machine (most likely virtual), it's provided for you as a resource in the Azure Resource Manager. To get started with Service Fabric in Azure, I kicked things off by running the ARM template available in the Azure Quick starts on github.
I started off by putting the template into source control and then creating a VSTS build and automated release for the template. Before you can run the template as it is on Github (at the time of this writing) you'll need to have the following pre-requisites:
I started off by putting the template into source control and then creating a VSTS build and automated release for the template. Before you can run the template as it is on Github (at the time of this writing) you'll need to have the following pre-requisites:
- A pre-existing key vault
- An X509 certificate populated in that key vault, stored as a .PFX file with a password to secure it.
In my case, I made up a separate resource group template to set all that up and stage it before executing the resource group template for the Service Fabric manager and it's associated VMs that would provide the nodes for the Fabric.
Once the templates were successfully executed and I had my resources created, I discovered a few more things that needed to be done before an App could be deployed to the fabric:
- In the Virtual Machine Scale Set that gets created by the template, you have to go turn the VMSS on in the portal! ** It's not turned on by default ! **
- As soon as the VMSS is turned on for the first time and starts connecting to the Service Fabric, it immediately starts an upgrade to the latest version of the Fabric. DO NOT TURN OFF THE VMSS DURING THIS TIME! OR YOU'LL HAVE TO START OVER. You can track the progress of the update during this initial start up using the following powershell snippet. As soon as the 'ClusterState' is 'Ready', you can start executing other operations:
- $resourceGroupName = 'myresourcegroup'; $clusterName = 'mycluster'; Get-AzureRmServiceFabricCluster -ResourceGroupName $resourceGroupName -Name $clusterName | select -Property Name,ClusterState,ManagementEndpoint
- In order to connect to the Service Fabric, you need to first specify a certificate for Admin clients to connect to the Fabric. You can do this with a command similar to the following:
- Add-AzureRmServiceFabricClientCertificate -ResourceGroupName $resourceGroupName -Name $clusterName -Thumbprint ABCDEFABCDEFABCDEFABCDEFABCDEFABCDEF3A9A -Admin
- NOTE: DON'T specify the same certificate as both an Admin certificate and as a read-only certificate, otherwise it can confuse the cluster + browser and prevent you from being able to log in as an Administrator!
- In order to log into the Explore via Chrome, you'll need to ensure that you explicitly import your client certificate into Chrome's certificate store, AND configure it for Client Authentication!
- Updating the user configuration of the Fabric by doing things like adding certificates CAN TAKE AN ABSURDLY LONG TIME because adding a certificate requires publication of that certificate OUT TO EACH NODE IN THE FABRIC. And it seems like they don't do it in parallel! That's why you should shorten the timeouts associated with operations in the Fabric. You can do this via the Service Fabric Cluster resource by going to the 'Fabric upgrades' tab in the blade -> 'Advanced upgrade settings'
- The default port in the Service Fabric Service project item in Visual Studio is http:8529. The default port for the load balancer in the Service Fabric cluster template is http:80. See a problem here ? You'll have to change one or the other to ensure they match up so that requests to your load-balancer front-end can actually get through to the machines in your cluster!
- The very next thing you should do after sanity checking your application to make sure it's correctly configured for communications ... secure the fucking thing! Now that you've proven that you can connect via port 80 and everything's mapped correctly, disable it! Move to port 443 and secure all of your requests by default! Not a single thing should go between clients and your cluster unencrypted! Additionally, all traffic between you and your clients should, as a matter of best practice, use message-based encryption wherever possible. See this article. As a hint, you should replace the HTTP mapping rule in your Resource Group template with an HTTPS mapping rule in the load balancer.
- Now that you've got a simple endpoint, start adding authentication and authorization and make sure you're allowing people to do only exactly that which you want them to do! TODO: include link for Service Fabric authentication and authorization!
- If you haven't already, you should absolutely set up automated releases via VSTS (if that's your tool of choice). Ensure that you've made your Service Fabric application upgradeable by placing the following in your Cloud.xml publish profile (or whichever publish profile you may be using):
- <UpgradeDeployment Mode="Monitored" Enabled="true">
- <Parameters FailureAction="Rollback" Force="True" />
- </UpgradeDeployment>
Wednesday, July 12, 2017
Some interesting points regarding automated deployment of Alert Rules within an Azure Resource Group template
After some recent endeavours to add Application Insights alert rules, I stumbled (quite sorely) over some quirks around defining Alert Rules within an ARM template:
1) When copying an existing definition out of the Azure portal, there will be a "hidden-link:..." prefixed tag in the tags section of the resource definition. Normally tags are simply extra metadata, but in the case of Application Insights and its related artifacts, the "hidden-link:" tag is actually a functional requirement. You can't delete it! Otherwise, the ARM template deployment will throw a very unhelpful 'Microsoft.WindowsAzure.Management.Monitoring.MonitoringServiceException' with no further details.
2) When defining Alert Rules on custom metrics, those metrics must *already* exist within the existing Application Insights instance. Otherwise, as above, the ARM template deployment will throw a very unhelpful 'Microsoft.WindowsAzure.Management.Monitoring.MonitoringServiceException' with no further details. The consequence of this is that you won't be able to define such alert rules when deploying a new resource group using the ARM template for the first time! Otherwise, it will fail and you'll be unable to execute the resource group deployment step.
1) When copying an existing definition out of the Azure portal, there will be a "hidden-link:..." prefixed tag in the tags section of the resource definition. Normally tags are simply extra metadata, but in the case of Application Insights and its related artifacts, the "hidden-link:" tag is actually a functional requirement. You can't delete it! Otherwise, the ARM template deployment will throw a very unhelpful 'Microsoft.WindowsAzure.Management.Monitoring.MonitoringServiceException' with no further details.
2) When defining Alert Rules on custom metrics, those metrics must *already* exist within the existing Application Insights instance. Otherwise, as above, the ARM template deployment will throw a very unhelpful 'Microsoft.WindowsAzure.Management.Monitoring.MonitoringServiceException' with no further details. The consequence of this is that you won't be able to define such alert rules when deploying a new resource group using the ARM template for the first time! Otherwise, it will fail and you'll be unable to execute the resource group deployment step.
Friday, January 27, 2017
Solving "The storage account named XXXXX already exists under the subscription" when deploying Azure Resource Group templates
I've recently run into a problem where I've been unable to deploy multiple resource groups for my applications that contain Storage Accounts. When executing the Resource Group template deployment, I get the following error:
"The storage account named XXXXX already exists under the subscription"
The underlaying cause seems to be that Microsoft fucked up backward compatibility on their API in resource group templates. Thanks to this blog post, I was able to get back up and running. The gist of the article, repeated here for posterity, is that you have to update to the latest (as of this writing) API version of 2016-01-01. For example:
"The storage account named XXXXX already exists under the subscription"
The underlaying cause seems to be that Microsoft fucked up backward compatibility on their API in resource group templates. Thanks to this blog post, I was able to get back up and running. The gist of the article, repeated here for posterity, is that you have to update to the latest (as of this writing) API version of 2016-01-01. For example:
{
“$schema”: “https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#”,
“contentVersion”: “1.0.0.0”,
“resources”: [
{
“name”: “dnntest20160705”,
“type”: “Microsoft.Storage/storageAccounts”,
“location”: “[resourceGroup().location]”,
“apiVersion”: “2016-01-01”,
“dependsOn”: [ ],
“tags”: {
“displayName”: “MyStorageAccount”
},
“sku”: {
“name”: “Standard_LRS”
},
“kind”: “Storage”
}
]
}
Wednesday, November 09, 2016
Solving "System.Net.WebException: The remote server returned an error: (417) Expectation Failed" with a WCF service
I recently started getting the following error message when trying to connect to a web service we had put on an Azure VM running behind an Azure load balancer:
System.ServiceModel.ProtocolException: The remote server returned an unexpected response: (417) Expectation Failed. ---> System.Net.WebException: The remote server returned an error: (417) Expectation Failed.
It turns out the fix was to put the following element in my <configuration> :
<system.net>
<settings>
<!-- This is required when running in Azure VMs behind an Azure load balancer -->
<servicePointManager expect100Continue="true" />
</settings>
</system.net>
System.ServiceModel.ProtocolException: The remote server returned an unexpected response: (417) Expectation Failed. ---> System.Net.WebException: The remote server returned an error: (417) Expectation Failed.
It turns out the fix was to put the following element in my <configuration> :
<system.net>
<settings>
<!-- This is required when running in Azure VMs behind an Azure load balancer -->
<servicePointManager expect100Continue="true" />
</settings>
</system.net>
Solving "System.Net.WebException: The remote server returned an error: (417) Expectation Failed" with a WCF service
I recently started getting the following error message when trying to connect to a web service we had put on an Azure VM running behind an Azure load balancer:
System.ServiceModel.ProtocolException: The remote server returned an unexpected response: (417) Expectation Failed. ---> System.Net.WebException: The remote server returned an error: (417) Expectation Failed.
It turns out the fix was to put the following element in my <configuration> :
<system.net>
<settings>
<!-- This is required when running in Azure VMs behind an Azure load balancer -->
<servicePointManager expect100Continue="true" />
</settings>
</system.net>
System.ServiceModel.ProtocolException: The remote server returned an unexpected response: (417) Expectation Failed. ---> System.Net.WebException: The remote server returned an error: (417) Expectation Failed.
It turns out the fix was to put the following element in my <configuration> :
<system.net>
<settings>
<!-- This is required when running in Azure VMs behind an Azure load balancer -->
<servicePointManager expect100Continue="true" />
</settings>
</system.net>
Friday, October 07, 2016
Passing parameters from a resource group template in Azure to a WebJob
As it turns out, WebJobs retrieve their connectionStrings and appSettings from the Azure app service blade settings, same as the web application in which they're running! You can just use the built-in "connectionstrings" and "appsettings" resources underneath a Web Application in your Resource group template to populate these values for the WebJobs right from your template. This is particularly useful for things like WebJobs dashboard and storage connection strings. Microsoft could really have done a better job of advertising this fact.
Friday, August 26, 2016
Unable to add multiple listen rule keys or subscriptions to a Topic in an Azure Service Bus in an Azure Resource Group template
I recently ran across a problem wherein I was unable to add multiple Listen rules for my applications to my Topics in Azure Service Bus within an Azure Resource Group Template. I would run my template and it would fail to create the keys for the Topic or the Subscription for the topic of there was more than one of either of those for the Topic. It wouldn't even be consistent: it would switch back and forth between them between runs of the template. After a discussion with one of the Solution Architects at Microsoft, I found out there is, at the time of this writing, a bug in the Azure Resource Manager for Topics within Service Buses that prevents Topic access rules and subscriptions from being simultaneously created. As a work around, I was instructed to introduce an artificial dependency between each of the subscriptions to force the resource manager to create them serially rather than in parallel and this did the trick.
For example:
// TODO:
For example:
// TODO:
Tuesday, June 28, 2016
Building a developer test lab with Test Controllers and Agents in VSTS
Microsoft has recently released their DevTest Lab resources to production in Azure. This great functionality can let you quickly build up a lab where you can test your products and tear it down.
Steps:
Steps:
- Create an Azure Resource Group project in Visual Studio for managing your DTL.
- Add a Virtual Network resource to the template
- Add a DevTest Lab resource to the template
- In the portal, bind the virtual network to the DTL in the Settings tab of the DTL
- Create a Virtual Machine to host your software. Use a Formula to install any pre-requisites that you want on the machine: e.g. Chrome, Notepad++, etc.
- As a pre-requisite for connecting to a vNext workflow, you **MUST** have the Azure PowerShell cmdlets installed on the machine as part of the Formula for the VM
- On the virtual machine, open a PowerShell console and enable remoting along with adding a corresponding firewall rule to remove the local-subnet-access-only restriction that's set by default:
PS> Enable-PSRemoting
PS> Set-NetFirewallRule –Name "WINRM-HTTP-In-TCP-PUBLIC" –RemoteAddress Any -LocalPort 5986
PS> Set-ExecutionPolicy RemoteSigned
PS> dir WSMan:\localhost\listener\*\Port # show the port on which WSMan is currently listening
PS> winrm set winrm/config/Listener?Address=*+Transport=HTTP '@{Port="5986"}' # Change the port on which WSMan runs, option 1
PS> Set-Item WSMan:\localhost\listener\*\Port 5986 # Change the port on which WSMan runs, option 2
- Configure the machine as per the tools here, which is essentially the same as the above steps, with some extra ones as well: https://github.com/Azure/azure-quickstart-templates/tree/master/201-vm-winrm-windows
- From another machine, run the following to ensure that your machine is connectable:
Test-WSMan -ComputerName mymachinedns.westus.cloudapp.azure.com -Port 5986
$sessionOpt = New-PSSessionOption -SkipCACheck
$session = New-PSSession -ComputerName [myvmname].westus.cloudapp.azure.com -Credential (Get-Credential) -Port 5986 -UseSSL -SessionOption $sessionOpt
Wednesday, June 15, 2016
Using the OAuth 2.0 configuration of HTTP Client Connectors with Dell Boomi (useful for Azure PaaS Web Applications that use Azure AD)
My company recently started using Dell's Boomi platform to connect to some of our PaaS applications running in Azure that use Azure Active Directory for authentication. We had tried previously to get the OAuth 2.0 security settings on the HTTP Client Connector working to no avail. Due to some of the work we have coming up for which we really wanted to be able to use the OAuth 2.0 configured connectors, I decided to try again, and got it working. Here's what I had to do in order to use an HTTP Client Connector with Azure AD:
- PRECONDITIONS:
- Must have two Web Applications defined in Azure AD: client application AppC, and service application AppS
- AppC must be setup for the following:
- Have an App Key defined
- In the manifest, must set to true the following settings: oauth2AllowImplicitFlow, oauth2AllowUrlPathMatching
- On the "Settings" tab of your HTTP Client, do the following:
- "URL" => The URL of the API to which you want to connect (underneath AppS).
- "Authentication Type" => OAuth 2.0
- "Client ID" => The Client ID of an Azure AD **Web Application** registered in Azure AD, that has **ALREADY BEEN PRECONFIGURED FOR ACCESS TO YOUR SERVICE**. This can be copied and pasted from the Azure web portal AD application page for your application. This is the Client ID in the configuration page for AppC.
- "Client Secret" => The App Key (in Azure terminology) of the Azure AD **Web Application** registered in Azure AD to be used as a client application for your service.
- "Authorization Token URL" => The "OAUTH 2.0 AUTHORIZATION ENDPOINT" copied out of the Azure AD "Applications" tab in the Azure Management Portal. This is the App Key (at least one of them anyway) for AppC in the Azure AD Management Portal.
- "Access Token URL" => The "OAUTH 2.0 TOKEN ENDPOINT" copied out of the Azure AD "Applications" tab in the Azure Management Portal.
- Under the "Add Authorization Parameter" link, you'll need to add 2 parameters. Click on the "Add Authorization Parameter" link twice to add them. You'll need the following for parameters:
- "grant_type" => "client_credentials"
- "resource" => The App ID URI of the target Web Application registered in Azure AD acting as the service to which your client Web Application is connecting. This is the App ID Uri of AppS.
- In the Azure AD portal for your Client Web Application, you'll need to add the OAuth callback URL for your Boomi account to the "Reply URLs" list for AppC. e.g. https://platform.boomi.com/account/[companyaccountname-11X11X]/oauth2/callback
- At the bottom of the page, click on the "Generate" button next to the "Access Token" label. Boomi will now attempt to connect to Azure AD. To do this, it will open up a new web page and attempt to authorize, so ensure that you have any pop-up blockers either turned off or configured to allow platform.boomi.com to open pop-ups.
Labels:
2.0,
ad,
azure,
azure active directory,
azure ad,
azuread,
boomi,
client,
connector,
dell,
http,
oauth
Friday, June 03, 2016
Implementing Application Warmup with IIS 7.5 on Windows Server 2008 R2
In order to be able to support certain features of some of our applications, we've found the need to enable application warmup. However, this isn't built-in to IIS 7.5, but rather it's available as the Application Initialization module provided by Microsoft. In order to get this working, you'll need to do the following:
- Install the Application Initialization 1.0 module available from Microsoft here.
- You'll need to edit the applicationHost.config file on your server to enable Application Initialization. To do this, you'll need to open the file at %WINDIR%\System32\inetsrv\config\applicationHost.config in your text editor of choice and make the following changes:
- Find the 'application' element that you wish to enable for Application Initialization, which you can do with an XPath similar to "/configuration/system.applicationHost/sites/site/application" and once found, set the
preloadEnabled="true"
attribute on the XML element. Add the element if it's not already there. - While on the 'application' element, take note of the 'applicationPool' value, because that's the Application Pool in which the application runs. Find the configuration node for that application pool (XPath: /configuration/system.applicationHost/applicationPools) and then set the following attributes on the <add> element for the application pool and its child 'processModel' element:
<add .... startMode="AlwaysRunning">
<processModel ... idleTimeout="0">
- In your application's web.config file, you'll need to add the following elements:
<system.webServer>
<applicationInitialization remapManagedRequestsTo="/InitializationProxyPage.txt" skipManagedModules="true" doAppInitAfterRestart="true">
<add initializationPage="/Services/MyCustomService.svc" hostName="localhost" />
</applicationInitialization>
<directoryBrowse enabled="true" />
</system.webServer>
You'll need to have the "InitializationProxyPage.txt" file in the root of your web application.
4. Enable "Anonymous" authentication in the Authentication panel of your application in IIS. This is necessary so that the warmup page can successfully execute a GET request for your initialization page to kick off the warmup.
5. If you use the system.webServer/security/authorization or system.webServer/security/authentication sections in your web.config to control access to the application, you'll also need to add the following under the /configuration element as a sibling to the system.webServer element:
5. If you use the system.webServer/security/authorization or system.webServer/security/authentication sections in your web.config to control access to the application, you'll also need to add the following under the /configuration element as a sibling to the system.webServer element:
<configuration>
<location path="InitializationProxyPage.txt">
<system.webServer>
<security>
<authorization>
<clear />
<add accessType="Allow" users="*"/>
</authorization>
</security>
</system.webServer>
</location>
</configuration>
This will grant explicit access to the InitializationProxyPage.txt file to all users (including the App Pool Identity user) so that application warmup is guaranteed to have access to that file and can bootstrap your services.Tuesday, April 12, 2016
Using ADAL.js correctly with AngularJS when setting up your endpoints
When searching around and using the tutorials on how to correctly use adal.js to authenticate your calls to Web API (or anything else) in Azure, you'll often see a block similar to this that you have to put in your App.js to configure your main module:
adalAuthenticationServiceProvider.init(
{
tenant: 'mytenant.onmicrosoft.com',
clientId: 'abc4db9b-9c54-4fdf-abcd-1234ec148319',
endpoints: {
'https://localhost:44301/api': 'https://some-app-id-uri/'
}
},
$httpProvider
);
adalAuthenticationServiceProvider.init(
{
tenant: 'mytenant.onmicrosoft.com',
clientId: 'abc4db9b-9c54-4fdf-abcd-1234ec148319',
endpoints: {
'https://localhost:44301/api': 'https://some-app-id-uri/'
}
},
$httpProvider
);
You'll notice that this appears to be pointing to the Web API root of a service running on localhost, and you'd be right. For this to work correctly, you'll need to enable OAUTH 2 path matching in the application manifest of the **client** that's connecting to the service!
Wednesday, March 30, 2016
Running and debugging Azure WebJobs locally on your development machine
Check out this article: https://github.com/Azure/azure-webjobs-sdk/wiki/Running-Locally
It shows you how to locally run and debug Azure WebJobs, which as it turns out is extremely handy because you can interact with all the Queues, Tables, Blobs etc as you normally would and get a full debugging environment.
It shows you how to locally run and debug Azure WebJobs, which as it turns out is extremely handy because you can interact with all the Queues, Tables, Blobs etc as you normally would and get a full debugging environment.
Updating your Azure AD Application Manifest
We've recently found that as we develop more applications in Azure, we need to put safeguards on the deployment of the applications to ensure that they're configured correctly. Part of this means editing the application manifests to ensure that certain settings are always enforced on certain applications. Here's what Microsoft has to say about updating Application manifests.
Bottom line, if you want to automate anything to do with manifests, you'll have to write your own application to use the Azure Graph API libraries to retrieve the manifest / application settings and edit them.
Bottom line, if you want to automate anything to do with manifests, you'll have to write your own application to use the Azure Graph API libraries to retrieve the manifest / application settings and edit them.
Sunday, February 28, 2016
Properly invoking scheduled WebJobs
Recently we've found the need to start using Scheduled Azure WebJobs. However, the examples out there are all gargbage, even in the case where you can find an actual example using a scheduled WebJob rather than a continuous WebJob. So, for the benefit of anyone interested, including future me, here's the proper way to invoke a Scheduled WebJob in the entry point of the WebJobs assembly:
What the above does is the following:/// <summary>/// The main entry point to the scheduled webjobs./// </summary>public class Program{/// <summary>/// Main entry point for the scheduled webjobs/// </summary>public static void Main(){IKernel kernel = new StandardKernel();kernel.Load(new ServicesScheduledWebJobsNinjectModule());var jobHostConfiguration = new JobHostConfiguration{JobActivator = new ServicesScheduledWebJobsActivator(kernel),DashboardConnectionString = ConfigurationManager.ConnectionStrings["AzureWebJobsDashboard"].ConnectionString,StorageConnectionString = ConfigurationManager.ConnectionStrings["AzureWebJobsStorage"].ConnectionString,};var host = new JobHost(jobHostConfiguration);// Must ensure that we call host.Start() to actually start the job host. Must do so in// order to ensure that all jobs we manually invoke can actually run.host.Start();// The following code will invoke all functions that have a 'NoAutomaticTriggerAttribute'// to indicate that they are scheduled methods.foreach (MethodInfo jobMethod in typeof(Functions).GetMethods().Where(m => m.GetCustomAttributes<NoAutomaticTriggerAttribute>().Any())){try{host.CallAsync(jobMethod).Wait();}catch (Exception ex){Console.Error.WriteLine("Failed to execute job method '{0}' with error: {1}", jobMethod.Name, ex);}}}}
- Configures the JobHost to use a dependency injection container via a custom IJobActivator implementation that, in our case, uses the Ninject dependency injection container.
- Configures the JobHost with a custom configuration so that we can control various items, including the connection strings for the dashboard and jobs storage.
- Starts the JobHost. This bit is important, because all the other examples out there neglect that this needs to be done.
- Dynamically resolves all schedulable methods that should be invoked, using the NoAutomaticTriggerAttribute built in to the WebJobs SDK. This attribute is used internally by the SDK to determine which methods need to be invoked manually (i.e. on demand) rather than by a continuous invocation used by Continous WebJobs.
Continuous Delivery of Azure Web Services
See this:
https://azure.microsoft.com/en-us/documentation/articles/cloud-services-dotnet-continuous-delivery/
https://azure.microsoft.com/en-us/documentation/articles/cloud-services-dotnet-continuous-delivery/
Sunday, February 21, 2016
Debugging Azure web apps on localhost
To get this working, you need to have an app registered with your localhost app root registered for the redirect URI and login URI. Here's the really important part of you're using AAD authentication:
You need to disable all forms of Authorization in IIS and enable Anonymous authentication for the application in the IIS manager on the web app itself so that Azure AD can take over the authentication!
You need to disable all forms of Authorization in IIS and enable Anonymous authentication for the application in the IIS manager on the web app itself so that Azure AD can take over the authentication!
Subscribe to:
Posts (Atom)