Wednesday 26 August 2015

Script for Generate Sample Data for Table Name

create proc GENERATEQUERYSampleData (@TBL varchar(40))
as
begin
declare @para varchar(max)= 'DECLARE @count INT
SET @count = 1
SET NOCOUNT ON ;

WHILE @count <= 100000
BEGIN

INSERT INTO ';

select @para +=TABLE_SCHEMA+'.'+TABLE_NAME +'('
from INFORMATION_SCHEMA.TABLES
where TABLE_NAME=@TBL

select @para += cl.COLUMN_NAME+','
from INFORMATION_SCHEMA.COLUMNS cl
join sys.columns C
    on c.is_identity=0 and Cl.TABLE_NAME=OBJECT_NAME(object_id) and cl.COLUMN_NAME=c.name
where cl.TABLE_NAME= @TBL;

SET @para = LEFT(@para,LEN(@para)-1);
SET @para += ')' +char(10)+' select ';


select @para += case
        when cl.DATA_TYPE in ('bit')
        then ' DATEPART(MILLISECOND, GETDATE()) >= 500 THEN 0 ELSE 1 END '
        when cl.DATA_TYPE in ('bigint','smallint','tinyint','int','numeric')
        then '(ABS(CHECKSUM(NEWID())) % 1231) + ((ABS(CHECKSUM(NEWID())) % 634) * 1)'
        when cl.DATA_TYPE in ('decimal','float','money')
        then '(ABS(CHECKSUM(NEWID())) % 100001) + ((ABS(CHECKSUM(NEWID())) % 100001) * 0.00001) '
        when cl.DATA_TYPE ='date'
        then 'dateadd(YEAR, (ABS(CHECKSUM(NEWID())) % 1000),dateadd(DAY, (ABS(CHECKSUM(NEWID())) % 10) , dateadd(MONTH, (ABS(CHECKSUM(NEWID())) % 10) , cast (GETDATE() as date ))))'
        when cl.data_type ='datetime'
        then 'DATEADD(MILLISECOND, (ABS(CHECKSUM(NEWID())) % 6000) * -1, DATEADD(MINUTE, (ABS(CHECKSUM(NEWID())) % 1000000) * -1, GETDATE()))'
        when cl.Data_type in ('char','nchar','nvarchar','varchar','binary','varbinary')
        then dbo.GetString(CHARACTER_MAXIMUM_LENGTH)
        end +','
from INFORMATION_SCHEMA.COLUMNS cl
join sys.columns C
    on c.is_identity=0 and Cl.TABLE_NAME=OBJECT_NAME(object_id) and cl.COLUMN_NAME=c.name
where cl.TABLE_NAME= @TBL;

SET @para = LEFT(@para,LEN(@para)-1)
SET @para += char(10)+char(10)+'SET @count += 1
END'

print  @para

end


--------------------------

CREATE function GetString (@Type varchar(20))
returns varchar(500)
as
begin


declare @q varchar(500);
set @Type =lTrim(rtrim(@Type))
if(@Type =1)
begin

set @q= 'CHAR((ABS(CHECKSUM(NEWID())) % 26) + 97) '

end
else if(@Type =2)
begin

set @q= 'CHAR((ABS(CHECKSUM(NEWID())) % 26) + 97) + CHAR((ABS(CHECKSUM(NEWID())) % 26) + 97)'

end
else if(@Type =3)
begin

set @q= 'CHAR((ABS(CHECKSUM(NEWID())) % 26) + 97) + CHAR((ABS(CHECKSUM(NEWID())) % 26) + 97)+CHAR((ABS(CHECKSUM(NEWID())) % 26) + 97) '

end
else if(@Type =4)
begin

set @q= 'CHAR((ABS(CHECKSUM(NEWID())) % 26) + 97) + CHAR((ABS(CHECKSUM(NEWID())) % 26) + 97)+CHAR((ABS(CHECKSUM(NEWID())) % 26) + 97) +CHAR((ABS(CHECKSUM(NEWID())) % 26) + 97)'

end
else if(@Type =5)
begin

set @q= 'CHAR((ABS(CHECKSUM(NEWID())) % 26) + 97) + CHAR((ABS(CHECKSUM(NEWID())) % 26) + 97)+CHAR((ABS(CHECKSUM(NEWID())) % 26) + 97) +CHAR((ABS(CHECKSUM(NEWID())) % 26) + 97)+ CHAR((ABS(CHECKSUM(NEWID())) % 26)+ 97) '

end
else
begin


set @q= 'CHAR((ABS(CHECKSUM(NEWID())) % 26) + 97) + CHAR((ABS(CHECKSUM(NEWID())) % 26) + 97)+CHAR((ABS(CHECKSUM(NEWID())) % 26) + 97) +CHAR((ABS(CHECKSUM(NEWID())) % 26) + 97)+ CHAR((ABS(CHECKSUM(NEWID())) % 26)+ 97)  +CHAR((ABS(CHECKSUM(NEWID())) % 26) + 97) + CHAR((ABS(CHECKSUM(NEWID())) % 26) + 97)'

end
return @q
end


Tuesday 25 August 2015

script to create store procedure disable row by giving table name in sql server

alter PROC [dbo].[GENERATEQUERYDel]
@TBL VARCHAR(50)=''
AS
BEGIN
DECLARE @CR  VARCHAR(200)='';
select @CR +='CREATE PROCEDURE '+TABLE_SCHEMA+'.Pr'+upper(left(@TBL,1))+right(@TBL,len(@TBL)-1)+'Del'
from INFORMATION_SCHEMA.TABLES
where TABLE_NAME=@TBL

declare @schema varchar(10);
select @schema =TABLE_SCHEMA
from INFORMATION_SCHEMA.TABLES
where TABLE_NAME=@TBL

declare @db varchar(10);
select @db =TABLE_CATALOG
from INFORMATION_SCHEMA.TABLES
where TABLE_NAME=@TBL


declare @par nvarchar(1000)='';
select @par+='@'+COLUMN_NAME+' '+DATA_TYPE +case
    when CHARACTER_MAXIMUM_LENGTH is null
    then ''
    when CHARACTER_MAXIMUM_LENGTH is not null
    then '('+CAST( REPLACE(CHARACTER_MAXIMUM_LENGTH,'-1','max') as varchar)+')'
    end +CHAR(10)
from INFORMATION_SCHEMA.COLUMNS cl
join sys.columns C
    on c.is_identity=1 and Cl.TABLE_NAME=OBJECT_NAME(object_id) and cl.COLUMN_NAME=c.name
where cl.TABLE_NAME= @TBL
       

declare @beb varchar(200) ='AS' +CHAR(10) +'BEGIN'+CHAR(10)+'SET ANSI_NULLS ON;'+CHAR(10)+'SET QUOTED_IDENTIFIER ON;'+CHAR(10)+'SET NOCOUNT ON;'+CHAR(10)+CHAR(10);
SET @beb+='BEGIN TRAN'+CHAR(10);
SET @beb+='BEGIN TRY'+CHAR(10) +CHAR(10);

-- Delete start

declare @update varchar(1000) ='UPDATE '+@schema+'.'+@TBL+' SET CurrentStatus=''D''';


declare @identityCol varchar(50)='';
select @identityCol=COLUMN_NAME
from INFORMATION_SCHEMA.COLUMNS cl
join sys.columns C
    on c.is_identity=1 and Cl.TABLE_NAME=OBJECT_NAME(object_id) and cl.COLUMN_NAME=c.name
where cl.TABLE_NAME= @TBL;

if(len(@identityCol) > 1)
begin
set @update +=' WHERE ';

set @update+=@identityCol+'=@'+@identityCol;

end

-- Delete end

DECLARE @END VARCHAR(50) =CHAR(10)+'COMMIT TRAN'+CHAR(10)+'END TRY'+CHAR(10);
DECLARE @CATCH VARCHAR(500) ='BEGIN CATCH'+CHAR(10)+'ROLLBACK TRAN'+CHAR(10)+CHAR(10)+CHAR(5)+CHAR(5)+CHAR(5)+'INSERT INTO ADMIN.TBLERRORlOG (CustomeError) VALUES (NULL);'+CHAR(10)+CHAR(10)+'END CATCH'+CHAR(10)+'END';

declare @h bit=0;

if exists (select 1 from INFORMATION_SCHEMA.COLUMNS where COLUMN_NAME='CurrentStatus' and TABLE_NAME=@tbl )
if( len(@par) > 1 )
begin

print @CR+CHAR(10)+@par+CHAR(10)+@beb+CHAR(10)+@update+CHAR(10)+@END+CHAR(10)+@CATCH+char(10)+'Go';

end

END

Script to create Store Procedure by Table name sql server

CREATE PROC [dbo].[GENERATEQUERY]
@TBL VARCHAR(20)=''
AS
BEGIN
DECLARE @CR  VARCHAR(200)='';
select @CR +='CREATE PROCEDURE '+TABLE_SCHEMA+'.Pr'+upper(left(@TBL,1))+right(@TBL,len(@TBL)-1)+'IU'
from INFORMATION_SCHEMA.TABLES
where TABLE_NAME=@TBL

declare @schema varchar(10);
select @schema =TABLE_SCHEMA
from INFORMATION_SCHEMA.TABLES
where TABLE_NAME=@TBL

declare @db varchar(10);
select @db =TABLE_CATALOG
from INFORMATION_SCHEMA.TABLES
where TABLE_NAME=@TBL


declare @par nvarchar(1000)='';
select @par+='@'+COLUMN_NAME+' '+DATA_TYPE +case
    when CHARACTER_MAXIMUM_LENGTH is null
    then ','
    when CHARACTER_MAXIMUM_LENGTH is not null
    then '('+CAST( REPLACE(CHARACTER_MAXIMUM_LENGTH,'-1','max') as varchar)+'),'
    end +CHAR(10)
from INFORMATION_SCHEMA.COLUMNS
where TABLE_NAME=@TBL
    AND COLUMN_NAME NOT IN  ('CreatedOn','UpdatedOn','UpdatedBy','UserId');

SET @par +='@CmdType char(1)'+char(10);
SET @par =REPLACE(@par,'@CreatedBy','@UserId')

declare @beb varchar(200) ='AS' +CHAR(10) +'BEGIN'+CHAR(10)+'SET ANSI_NULLS ON;'+CHAR(10)+'SET QUOTED_IDENTIFIER ON;'+CHAR(10)+'SET NOCOUNT ON;'+CHAR(10)+CHAR(10);
SET @beb+='BEGIN TRAN'+CHAR(10);
SET @beb+='BEGIN TRY'+CHAR(10) +CHAR(10)+'IF(@CmdType=''I'')'+char(10)+'BEGIN'+char(10);

-- insert start
declare @insert varchar(1000) ='INSERT INTO '+@schema+'.'+@TBL+'(';

select @insert+=c.name+','
from INFORMATION_SCHEMA.COLUMNS Cl
join sys.columns C
    on c.is_identity=0 and Cl.TABLE_NAME=OBJECT_NAME(object_id) and cl.COLUMN_NAME=c.name
where cl.TABLE_NAME= @TBL and c.name <> 'Createdon';

SET @insert = LEFT(@insert,LEN(@insert)-1);
SET @insert+=') VALUES ('

select @insert+='@'+COLUMN_NAME+','
from INFORMATION_SCHEMA.COLUMNS cl
join sys.columns C
    on c.is_identity=0 and Cl.TABLE_NAME=OBJECT_NAME(object_id) and cl.COLUMN_NAME=c.name
where cl.TABLE_NAME= @TBL and c.name <> 'Createdon';

SET @insert = LEFT(@insert,LEN(@insert)-1);
SET @insert =REPLACE(@insert,'@CreatedBy','@UserId')
SET @insert =REPLACE(@insert,'@UpdatedBy','@UserId')
SET @insert =REPLACE(@insert,'@UpdatedOn','GETDATE()')
SET @insert+=');'+CHAR(10)+CHAR(10)+'END'+char(10)+'ELSE'+char(10)+'BEGIN'+char(10);

--insert end

-- update start

declare @update varchar(1000) ='UPDATE '+@schema+'.'+@TBL+' SET ';


select @update+=c.name+'=@'+c.name+','
from INFORMATION_SCHEMA.COLUMNS Cl
join sys.columns C
    on c.is_identity=0 and Cl.TABLE_NAME=OBJECT_NAME(object_id) and cl.COLUMN_NAME=c.name
where cl.TABLE_NAME= @TBL and c.name not like 'Create%';

set @update =left(@update,len(@update)-1);

declare @identityCol varchar(50)='';
select @identityCol=COLUMN_NAME
from INFORMATION_SCHEMA.COLUMNS cl
join sys.columns C
    on c.is_identity=1 and Cl.TABLE_NAME=OBJECT_NAME(object_id) and cl.COLUMN_NAME=c.name
where cl.TABLE_NAME= @TBL;

if(len(@identityCol) > 1)
begin
set @update +=' WHERE ';

set @update+=@identityCol+'=@'+@identityCol;

end


SET @update =REPLACE(@update,'@UpdatedBy','@UserId')
SET @update =REPLACE(@update,'@UpdatedOn','GETDATE()')
set @update +=char(10)+'END'+char(10)+char(10);
-- updaate end

DECLARE @END VARCHAR(50) ='COMMIT TRAN'+CHAR(10)+'END TRY'+CHAR(10);
DECLARE @CATCH VARCHAR(500) ='BEGIN CATCH'+CHAR(10)+'ROLLBACK TRAN'+CHAR(10)+CHAR(10)+CHAR(5)+CHAR(5)+CHAR(5)+'INSERT INTO ADMIN.TBLERRORlOG (CustomeError) VALUES (NULL);'+CHAR(10)+CHAR(10)+'END CATCH'+CHAR(10)+'END';


print @CR+CHAR(10)+@par+CHAR(10)+@beb+CHAR(10)+@insert+CHAR(10)+@update+CHAR(10)+@END+CHAR(10)+@CATCH+char(10)+'Go';

END

Wednesday 19 August 2015

Programmatically Create IIS Website and Application Pool Using C#

In this article, we will look into creation of an IIS 7\7.5 website and corresponding application pool programmatically using a C# console application. These activities are most commonly done by an IIS administrator. We can automate those activities using this application. Let's create a console application and name it IISAutomation in Visual Studio 2010 by targeting .NET 3.5 as in the following:

console application

Let's add a reference to Microsoft.Web.Administration that is present under C:\Windows\System32\inetsrv [IIS installation directory]. This assembly contains classes that a developer can use to administer the IIS Manager. First we will create an application pool using the following code:
  1. private static void CreateAppPool(string poolname,bool enable32bitOn64, ManagedPipelineMode mode,string runtimeVersion="v4.0")  
  2. {  
  3.     using (ServerManager serverManager = new ServerManager())  
  4.     {  
  5.         ApplicationPool newPool = serverManager.ApplicationPools.Add(poolname);  
  6.         newPool.ManagedRuntimeVersion = runtimeVersion;  
  7.         newPool.Enable32BitAppOnWin64 = true;  
  8.         newPool.ManagedPipelineMode = mode;  
  9.         serverManager.CommitChanges();  
  10.     }  
  11. }  
Here, we created an application pool by creating an instance of ApplicationPool object. Then, we set the properties of the application pool, like the name, .NET version to be used and the committed changes by calling CommitChanges(). Similarly, we will create a website using the following code:
  1. private static void CreateIISWebsite(string websiteName, string hostname, string phyPath, string appPool)  
  2. {  
  3.     ServerManager iisManager = new ServerManager();  
  4.     iisManager.Sites.Add(websiteName, "http""*:80:" + hostname, phyPath);  
  5.     iisManager.Sites[websiteName].ApplicationDefaults.ApplicationPoolName = appPool;  
  6.   
  7.     foreach (var item in iisManager.Sites[websiteName].Applications)  
  8.     {  
  9.         item.ApplicationPoolName = appPool;  
  10.     }  
  11.   
  12.     iisManager.CommitChanges();  
  13. }  
Here, we created a new web site by creating an instance of ServerManager, adding it to the Sites collection and setting its properties like name, physical path and so on and committed the changes.

We will use the preceding methods in our Main method and create an application pool and a web site using the following code:
  1. static void Main(string[] args)  
  2. {  
  3.     Console.WriteLine("Do you want to create an Application Pool:y/n");  
  4.     string response = Console.ReadLine();  
  5.     if (response.ToString() == "y")  
  6.     {  
  7.         Console.Write("Please enter Application Pool Name:");  
  8.         string poolname = Console.ReadLine();  
  9.         bool isEnable32bit = false;  
  10.         ManagedPipelineMode mode = ManagedPipelineMode.Classic;  
  11.         Console.Write("Need to enable 32 bit on Windows 64 bit?y/n [Applicable for 64 bit OS]: y/n?");  
  12.         string enable32bit = Console.ReadLine();  
  13.         if (enable32bit.ToLower() == "y")  
  14.         {  
  15.             isEnable32bit = true;  
  16.         }  
  17.         Console.Write("Please select Pipeline Mode: 1 for Classic, 2 for Integrated:");  
  18.         string pipelinemode = Console.ReadLine();  
  19.         if (pipelinemode.ToLower() == "2")  
  20.         {  
  21.             mode = ManagedPipelineMode.Integrated;  
  22.         }  
  23.         Console.Write("Please select Runtime Version for Application Pool: 1 for v2.0, 2 for v4.0:");  
  24.         string runtimeVersion = Console.ReadLine()== "1" ? "v2.0" : "v4.0";  
  25.           
  26.         CreateAppPool(poolname, isEnable32bit, mode, runtimeVersion);  
  27.         Console.WriteLine("Application Pool created successfully...");  
  28.     }  
  29.                 Console.WriteLine("Do you want to create a website:y/n");  
  30.     response = Console.ReadLine();  
  31.     if (response.ToString() == "y")  
  32.     {  
  33.         Console.Write("Please enter website name:");  
  34.         string websiteName = Console.ReadLine();  
  35.         Console.Write("Please enter host name:");  
  36.         string hostname = Console.ReadLine();  
  37.         Console.Write("Please enter physical path to point for website:");  
  38.         string phypath = Console.ReadLine();  
  39.         Console.WriteLine("Please enter Application pool Name:");  
  40.         foreach(var pool in new ServerManager().ApplicationPools)  
  41.         {  
  42.             Console.WriteLine(pool.Name);  
  43.         }  
  44.         Console.WriteLine("");  
  45.         Console.Write("Please enter Application pool Name for web site:");  
  46.         string poolName = Console.ReadLine();  
  47.         CreateIISWebsite(websiteName,hostname,phypath,poolName);  
  48.         Console.WriteLine("Web site created successfully...");  
  49.         Console.ReadLine();  
  50.     }  
  51. }  
Here, we set the attributes necessary for web site and application creation by getting input from the console. Let's run the application and input the details as shown below:

run the application

Sunday 9 August 2015

SQL Server Migration Assistant (SSMA) Team's Blog

Microsoft SQL Server Migration Assistant (SSMA) v5.3 is now available.

Rate This
SSMA simplifies database migration process from Oracle/Sybase/MySQL and Microsoft Access to SQL Server and SQL Azure. SSMA automates all aspects of migration including migration assessment analysis, schema and SQL statement conversion, data migration as well as migration testing to reduce cost and reduce risk of your database migration project. 
The new version of SSMA - SSMA 5.3 provides the following major enhancements:
  • Support of Migration to MS SQL Server 2014.
  • Improved conversion mechanism when migrating to Azure.
  • New features in the Migration GUI.
  • No requirement to get a License key to start using SSMA.

Download SQL Server Migration Assistant (SSMA) v.5.3 :

Launch the download of the SSMA for Oracle.





The SSMA product team is available to answer your questions and provide limited technical support. Contact the team at ssmahelp@microsoft.com

Friday 7 August 2015

SQL SERVER – Find Most Expensive Queries Using DMV

The title of this post is what I can express here for this quick blog post. I was asked in recent query tuning consultation project, if I can share my script which I use to figure out which is the most expensive queries are running on SQL Server. This script is very basic and very simple, there are many different versions are available online. This basic script does do the job which I expect to do – find out the most expensive queries on SQL Server Box.
SELECT TOP 10 SUBSTRING(qt.TEXT, (qs.statement_start_offset/2)+1,
((
CASE qs.statement_end_offset
WHEN -1 THEN DATALENGTH(qt.TEXT)
ELSE qs.statement_end_offset
END - qs.statement_start_offset)/2)+1),
qs.execution_count,
qs.total_logical_reads, qs.last_logical_reads,
qs.total_logical_writes, qs.last_logical_writes,
qs.total_worker_time,
qs.last_worker_time,
qs.total_elapsed_time/1000000 total_elapsed_time_in_S,
qs.last_elapsed_time/1000000 last_elapsed_time_in_S,
qs.last_execution_time,
qp.query_plan
FROM sys.dm_exec_query_stats qs
CROSS APPLY sys.dm_exec_sql_text(qs.sql_handle) qt
CROSS APPLY sys.dm_exec_query_plan(qs.plan_handle) qp
ORDER BY qs.total_logical_reads DESC -- logical reads
-- ORDER BY qs.total_logical_writes DESC -- logical writes
-- ORDER BY qs.total_worker_time DESC -- CPU time

Last Ran Query – Recently Ran Query

How many times we have wondered what were the last few queries ran on SQL Server? Following quick script demonstrates last ran query along with the time it was executed on SQL Server 2005.
SELECT deqs.last_execution_time AS [Time], dest.TEXT AS [Query]
FROM sys.dm_exec_query_stats AS deqs
CROSS APPLY sys.dm_exec_sql_text(deqs.sql_handle) AS dest
ORDER BY deqs.last_execution_time DESC

Reference : Pinal Dave (http://blog.SQLAuthority.com) , BOL – sys.dm_exec_query_stats, BOL – sys.dm_exec_sql_text

SQL SERVER – Weekly Series – Memory Lane – #010

Here is the list of selected articles of SQLAuthority.com across all these years. Instead of just listing all the articles I have selected a few of my most favorite articles and have listed them here with additional notes below it. Let me know which one of the following is your favorite article from memory lane.
Special Note: Very last link in the blog post is one of my most favorite performance tuning trick.

2006

In year 2006 I started to blog and honestly I had no idea what is the blogging? It was just a collection of the bookmarks and I had a great time writing them up. I always thought I will read it when I need them. Today when I often read my old blog post – I feel nostalgic and also realize that I have improved a lot technically as well professionally. One of the blog posts which I wrote regarding how to truncate log file got quite popular with DBA and Developers who got issues with growing log files. My solution was not perfect as it was breaking the chain of log, leading to create issues with point in time restore. In SQL Server 2008 the method I demonstrate to truncate the log was replaced. I wrote following blog post regarding how to truncate the log file in SQL Server 2008 and later version.

2007

The cursor is the most interesting subject of the database. I have never seen any concept more abused as cursor in the history of SQL Server. However, there are few cases where set theory fails and the cursor is the only right solution. In this blog post I have demonstrated how to write a very fundamental cursor. Here is the updated the post where I wrote cursor with AdventureWorks Database.

2008

How many times we have wondered what were the last few queries ran on SQL Server? Following quick script demonstrates last ran query along with the time it was executed on SQL Server 2005 and later editions.
Here is the image which displays evolution of man.
evolution1 SQL SERVER   Weekly Series   Memory Lane   #010

2009

Identifying longest running query is extremely important for any server because it consumes very valuable server resources. I quickly wrote down a script which produced the longest running queries in SQL Server. A performance tuning expert can identify the queries and further tune them. The query was also giving divide by zero error once in a while, so I wrote a following blog post where I removed the bug of Divide by Zero for Longest Running Query.
 In SQL Server sometime there are required when a T – SQL script has to wait for some time before executing next statement. It is quite common that developers depend on applying to take over this delay issue. However, SQL Server itself has very strong time management function of WAITFOR. Let us see two usage of WAITFOR clause.
Every developer is a victim of the situation where they have run a query on an incorrect server or changed a configuration where they did not intend to. In SQL Server 2008 there is a special feature which can change the color of the task bar. This will alert the developer to run queries on the server.
sqlcmd has all the feature which osql has to offer, additionally sqlcmd has many added features than osql. isql was introduced in earlier versions of SQL Server. osql was introduced in SQL Server 2000 version. sqlcmd is newly added in SQL Server 2005 and offers additionally functionality which SQL Server 2005 offers.

2010

This is a straight script when executed we know where are the data files stored in SQL Server.
In year 2010 I had the fantastic opportunity to present in front of 1100 people at Infosys. The presentation was done at the place where the screen was as big as a big screen.
infosys2 SQL SERVER   Weekly Series   Memory Lane   #010
When a non-clustered index is created without any option the default option is IGNORE_DUP_KEY = OFF, which means when duplicate values are inserted it throws an error regarding duplicate value. If the option is set with syntaxIGNORE_DUP_KEY = ON when duplicate values are inserted it does not throw an error but just displays a warning. This is an interesting blog post where I experiment with this T-SQL property.

2011

The year 2010 was a fantastic year in my career I had a great time traveling many countries for business trips and have learned a lot from various cultures. Additionally this provided me an opportunity to meet with local community and share learnings.
  • Singapore (twice)
  • Malaysia (twice)
  • Sri Lanka (thrice)
  • Nepal (once)
  • United States of America (twice)
  • United Arab Emirates (UAE) (once)
Multiple small Virtual Log Files commonly known as VLFs together make an LDF file. The writing of the VLF is sequential and resulting in the writing of the LDF file is sequential as well. This leads to another talk that one does not need more than one log file in most cases. Understanding the VLF is very important and at times if the growth of the VLF is impacting performance it should be reduced as well.
Performance Tuning is quite interesting and Index plays a vital role in it. A proper index can improve the performance and a bad index can hamper the performance. If you should not create all the missing indexes this script suggest. This is just for guidance. You should not create more than 5-10 indexes per table. If you should not drop all the unused indexes this script suggests. This is just for guidance.
Performance Tuning Consultancy should be called Performance Tuning Practice as there is never end of learning new thing. I had earlier learned this interesting trick which I would like to call everybody to pay attention to learn. There are cases and scenarios where the data is so much in the database it is impossible to take a backup and restore them on the other server to practice out few of the performance tuning tricks. If this is the scenario you face, you can just take a backup of the schema and the statistics. Once statistics are created along with the schema, without data in the table, all the queries will work as how they will work on the production server. This way, without access to the data, we were able to recreate the same scenario as production server on the development server.

2012

My Book co-author Vinod Kumar writes his perspective behind interview processes: Positioning yourself is close to marketing yourself in this industry. Productivity is one of the assets which management always loves to hear. Look at areas where you have made process improvements and you are using the tools to the maximum (Developer tools or Office productivity tools). This will sell at any level – from developer productivity to operational productivity: you surely are increasing your chances in getting qualified for the next interview cycle.
Jan 2012 was a quiz month and we had a great time asking interesting questions and have received fantastic responses. Do you know the answers of the questions – if yes, fantastic if no, you still have time to go over them and refresh your memory.
Reference: Pinal Dave (http://blog.sqlauthority.com)

Missing Index Script Sql server

Click to Download Scripts

Performance Tuning is quite interesting and Index plays a vital role in it. A proper index can improve the performance and a bad index can hamper the performance.
Here is the script from my script bank which I use to identify missing indexes on any database.
Please note, if you should not create all the missing indexes this script suggest. This is just for guidance. You should not create more than 5-10 indexes per table. Additionally, this script sometime does not give accurate information so use your common sense.
Any way, the scripts is good starting point. You should pay attention to Avg_Estimated_Impact when you are going to create index. The index creation script is also provided in the last column.

Click to Download Scripts

-- Missing Index Script
-- Original Author: Pinal Dave (C) 2011
SELECT TOP 25
dm_mid.database_id
AS DatabaseID,
dm_migs.avg_user_impact*(dm_migs.user_seeks+dm_migs.user_scans) Avg_Estimated_Impact,
dm_migs.last_user_seek AS Last_User_Seek,
OBJECT_NAME(dm_mid.OBJECT_ID,dm_mid.database_id) AS [TableName],
'CREATE INDEX [IX_' + OBJECT_NAME(dm_mid.OBJECT_ID,dm_mid.database_id) + '_'
+ REPLACE(REPLACE(REPLACE(ISNULL(dm_mid.equality_columns,''),', ','_'),'[',''),']','') +
CASE
WHEN dm_mid.equality_columns IS NOT NULL AND dm_mid.inequality_columns IS NOT NULL THEN '_'
ELSE ''
END
+ REPLACE(REPLACE(REPLACE(ISNULL(dm_mid.inequality_columns,''),', ','_'),'[',''),']','')
+
']'
+ ' ON ' + dm_mid.statement
+ ' (' + ISNULL (dm_mid.equality_columns,'')
+
CASE WHEN dm_mid.equality_columns IS NOT NULL AND dm_mid.inequality_columns IS NOT NULL THEN ',' ELSE
'' END
+ ISNULL (dm_mid.inequality_columns, '')
+
')'
+ ISNULL (' INCLUDE (' + dm_mid.included_columns + ')', '') AS Create_Statement
FROM sys.dm_db_missing_index_groups dm_mig
INNER JOIN sys.dm_db_missing_index_group_stats dm_migs
ON dm_migs.group_handle = dm_mig.index_group_handle
INNER JOIN sys.dm_db_missing_index_details dm_mid
ON dm_mig.index_handle = dm_mid.index_handle
WHERE dm_mid.database_ID = DB_ID()
ORDER BY Avg_Estimated_Impact DESC
GO

Unused Index Script sql server

Performance Tuning is quite interesting and Index plays a vital role in it. A proper index can improve the performance and a bad index can hamper the performance.
Here is the script from my script bank which I use to identify unused indexes on any database.
Please note, if you should not drop all the unused indexes this script suggest. This is just for guidance. You should not create more than 5-10 indexes per table. Additionally, this script sometime does not give accurate information so use your common sense.
Any way, the scripts is good starting point. You should pay attention to User Scan, User Lookup and User Update when you are going to drop index. The generic understanding is if this values are all high and User Seek is low, the index needs tuning. The index drop script is also provided in the last column.

Click to Download Scripts

-- Unused Index Script
-- Original Author: Pinal Dave (C) 2011
SELECT TOP 25
o.name
AS ObjectName
, i.name AS IndexName
, i.index_id AS IndexID
, dm_ius.user_seeks AS UserSeek
, dm_ius.user_scans AS UserScans
, dm_ius.user_lookups AS UserLookups
, dm_ius.user_updates AS UserUpdates
, p.TableRows
, 'DROP INDEX ' + QUOTENAME(i.name)
+
' ON ' + QUOTENAME(s.name) + '.' + QUOTENAME(OBJECT_NAME(dm_ius.OBJECT_ID)) AS 'drop statement'
FROM sys.dm_db_index_usage_stats dm_ius
INNER JOIN sys.indexes i ON i.index_id = dm_ius.index_id AND dm_ius.OBJECT_ID = i.OBJECT_ID
INNER JOIN sys.objects o ON dm_ius.OBJECT_ID = o.OBJECT_ID
INNER JOIN sys.schemas s ON o.schema_id = s.schema_id
INNER JOIN (SELECT SUM(p.rows) TableRows, p.index_id, p.OBJECT_ID
FROM sys.partitions p GROUP BY p.index_id, p.OBJECT_ID) p
ON p.index_id = dm_ius.index_id AND dm_ius.OBJECT_ID = p.OBJECT_ID
WHERE OBJECTPROPERTY(dm_ius.OBJECT_ID,'IsUserTable') = 1
AND dm_ius.database_id = DB_ID()
AND
i.type_desc = 'nonclustered'
AND i.is_primary_key = 0
AND i.is_unique_constraint = 0
ORDER BY (dm_ius.user_seeks + dm_ius.user_scans + dm_ius.user_lookups) ASC
GO


----------------------

That’s a great script (as yours always are), however I think you are missing another class of “missing indexes”, and that is indexes that don’t have stats at all. Rows are only written to dm_db_index_usage_stats when an index is used. If that index is totally un-used (even for writing to), it won’t appear in dm_db_index_usage_stats and therefore won’t be included in your script.
Try this on a production system, you might be surprised by how many rows it returns:


SELECT object_name(i.object_id) as tableName, i.name as indexName
FROM sys.indexes i
LEFT JOIN sys.dm_db_index_usage_stats s ON i.object_id = s.object_id AND i.index_id = s.index_id AND s.database_id = db_id()
WHERE objectproperty(i.object_id,’IsUserTable’) = 1 and i.index_id> 0
AND s.object_id IS NULL
AND i.is_Primary_Key = 0
AND i.is_unique_constraint = 0
AND i.is_unique = 0

Thursday 6 August 2015

SQL SERVER – 2008 – Find If Index is Being Used in Database

It is very often I get query that how to find if any index is being used in database or not. If any database has many indexes and not all indexes are used it can adversely affect performance. If number of index is higher it reduces the INSERT / UPDATE / DELETE operation but increase the SELECT operation. It is recommended to drop any unused indexes from table to improve the performance.
Before dropping the index it is important to check if index is being used or not. I have wrote quick script which can find out quickly if index is used or not. SQL Server 2005 and later editions have Dynamic Management Views (DMV) which can queried to retrieve necessary information.
We will run SELECT on Employee table of AdventureWorks database and check it uses Indexes or not. All the information about Index usage is stored in DMV – sys.dm_db_index_usage_stats. Let us run following query first and save our results.
USE AdventureWorks
GO
SELECT DISTINCT OBJECT_NAME(sis.OBJECT_ID) TableName, si.name AS IndexName, sc.Name AS ColumnName,
sic.Index_ID, sis.user_seeks, sis.user_scans, sis.user_lookups, sis.user_updates
FROM sys.dm_db_index_usage_stats sis
INNER JOIN sys.indexes si ON sis.OBJECT_ID = si.OBJECT_ID AND sis.Index_ID = si.Index_ID
INNER JOIN sys.index_columns sic ON sis.OBJECT_ID = sic.OBJECT_ID AND sic.Index_ID = si.Index_ID
INNER JOIN sys.columns sc ON sis.OBJECT_ID = sc.OBJECT_ID AND sic.Column_ID = sc.Column_ID
WHERE sis.Database_ID = DB_ID('AdventureWorks') AND sis.OBJECT_ID = OBJECT_ID('HumanResources.Employee');
GO
Now let us run following two SELECT statement which will utilize Indexes on table Employee.
USE AdventureWorks
GO
SELECT *
FROM HumanResources.Employee WITH (INDEX = 1)
WHERE EmployeeID = 1
GO
SELECT *
FROM HumanResources.Employee WITH (INDEX = 2)
WHERE LoginID = 'adventure-works\guy1'
GO
Note : WITH (INDEX  = Number) is not required but I have used it to make sure that first query uses Index 1 and second query uses Index 2. Both the query will return the same result. Now once again we will run our initial query  getting data from sys.dm_db_index_usage_stats and compare our result with initial data.
indexused SQL SERVER   2008   Find If Index is Being Used in Database
(Click on image to see larger image)
It is clear from comparing both the result set that when running query on tables it updates sys.dm_db_index_usage_stats and increment column user_seeks.
Above whole process explains that any index usage is stored in the sys.dm_db_index_usage_stats. DMV sys.dm_db_index_usage_stats stores all the usage since SQL Server is restarted. Once SQL Server service is restarted sys.dm_db_index_usage_stats is reset to zero but over the period of the time it updates the values in the columns. If we run our initial query without WHERE condition we can get many rows which contains IndexName and their usage. That will give us idea how many indexes are heavily used. If using WHERE condition we do not find our index in the table it is clear indication that Index is not used much.
If SQL Server services are not restarted in reasonable amount of time and if any index usage is not found, the index should be dropped. Again, make sure you have test your performance after dropping the index. If it gets worst put that index back and continue exercise.

SQL SERVER – Find Last Date Time Updated for Any Table

I just received an email from one of my regular readers who is curious to know if there is any way to find out when a table is recently updated. I was ready with my answer! I promptly suggested him that if a table contains UpdatedDate or ModifiedDate date column with default together with value GETDATE(), he should make use of it. On close observation the table is not required to keep history when any row is inserted. However, the sole prerequisite is to be aware of when any table has been updated. That’s it!
If a user wants to finds out when was the last table updated he can query dynamic management view (dmv) – sys.dm_db_index_usage_stats and easily figure out when was the table updated last. Let us comprehend this example by creating a table and updating it. We can use dmv to determine when it was updated last.
USE AdventureWorks
GO
CREATE TABLE Test
(ID INT,
COL VARCHAR(100))
GO
INSERT INTO Test
SELECT 1,'First'
UNION ALL
SELECT 2,'Second'
GO

Now we have created a table and populated it with data. Next, we will run the following query to find out when it was last updated.
SELECT OBJECT_NAME(OBJECT_ID) AS DatabaseName, last_user_update,*
FROM sys.dm_db_index_usage_stats
WHERE database_id = DB_ID( 'AdventureWorks')
AND
OBJECT_ID=OBJECT_ID('test')

Running query provides accurate details of when was  the table last updated. If WHERE condition is entirely removed it will provide details of the entire database.
lastupdated SQL SERVER   Find Last Date Time Updated for Any Table

Sunday 2 August 2015

Developer Checklist For Unit Test

 Related Articles:
Introduction

Unit Tests play an important role in delivering high quality software solutions. An ideal unit test is a piece of code (automated test) written by a developer which exercises a small but specific area of code functionality to ensure that it works as expected.

Why Unit Test

According to the Test Triangle, in a software project the largest number of tests must be Unit Tests. Because multiple individual Unit Tests exercises small units of functionality which spans over various areas of functionality offered by the software solution.

ui test

Unit Test Check-List

While writing Unit Test(s) following points must be considered and followed by the Developers and SDETs.
Check Description Mart
Self-Describing Names Unit Test method names must be Self-Describing and Pascal case. For example choose Add_New_Customer_With_Valid_AcccountNumber over AddCustomer_with_validAcc or addCustomer etc.

Also focus on naming style, keep the naming style consistent across all the tests methods and test.
 
A3 (Arrange, Asset, Act) Make sure that all the Test Methods are designed around Arrange, Act and Assert.

If required Refactor your code to fall into these three sections.
 
Test Happy and Sad Path The unit test should cover all possible scenarios and strive for high code coverage and ensuring good quality metrics. Unit Test methods must exercise all possible use case scenarios to test the input validations, interactions, errors messages, edge cases and exceptions etc.  
Make use of Attributes Use Test Framework provided Attributes like:  
  [TestCategory("<describe if its Unit or Integration Test>")]
[TestCategory("<Which section of the application is being tested>")]
[Priority(n), TestCategory("<define if it’s Gated or not-Gated build Tests>")]
[WorkItem(xxxx)]
[Owner("<who wrote this test>")]
 
Have least number of asserts per test (if applicable) A good Unit test should only have limited # of assert statements. It should unit test the very functionality, as indicated in its descriptive name.

A well-defined Unit Test should contain only one assert statement per test and must not try to exercise all the validation/boundary checks etc. by multiple Assert() in one Unit Test method.
 
Keep assert messages descriptive Keep assert messages descriptive Use descriptive messages to improve readability of code and the build log.

Assert.IsTrue(customer.IsExist,"The Customer is not found in the Database");
 
Unit != Integration There is a very fine line between Unit and Integration, if you happen to go beyond what your function is supposed to be doing then you are not writing a Unit Test. I.e. Unit Test doesn’t focus on interaction with external systems and software layers/dependencies.

Test Doubles (for example Microsoft Fakes framework) comes into the picture to write unit tests which has dependencies on external libraries and systems etc.
 
Follow OOP Design and Adopt DI Following Dependency Injection will allow to convert potential Integration Tests into small and quickly testable Unit Tests by taking advantages of Test Doubles (e.g. Microsoft Fakes, Moq, FakeItEasy frameworks etc.)  
Should be thorough Unit Tests are supposed to test all the possible areas of functionality that are subject to failure due to incorrect input/validation checks/boundary checks etc. for given function/method.  
Must be Repeatable Unit Tests must be repeatable for every build and must produce the same results. The development best practice suggests that if you are working on code that is impacting a Unit Test then you must fix the affected Unit Test as well and ensure that it passes.  
Have to be Independent Unit Tests must be independent of another test. In other words, no collateral damage. Hence, a Unit Test must focus only on a small aspect of big functionality. When this Unit Test fails, it should be easy to discover where the issue is in the code. I.e. can be tested in isolation.  
Keep it Professional Even though at times Unit Tests may appear to be very simple and small, you must write Unit Tests with coding practices as good as you use for your main development coding. You may want to follow Refactoring, Code Analysis and Code Review practices and so on as for your Test Projects as well.  
No Collateral Damage Make sure to Run all related Unit Tests after any dev code change big or small; to verify and ensure that no collateral damage occurs or has been introduced.  
If you break it, You bought it If you are working on a feature and to verify no collateral damage, as a best practice run all the Unit Tests. If you observe that some Unit Tests started failing because of your code changes then you own to fix those broken Unit Tests to make sure that continue to pass.  
Track and maintain the tests The test code changes should be tracked and maintained as on-going effort. Continue to follow the design principles and coding guidelines.  
Code Review the Tests Just like any other Dev code, Unit Tests also needs to be code reviewed by peer. Regardless of size of the test; follow the process.  
  Code review might include reviewing the name of test method, scenarios covered, conditions checked, scope of assertions and code / design smells, etc.  

Increase Your Website Traffic

As we all know, nowadays a website matters a lot to us. The word on-line plays a vital role in our life. Everyone have their own websites. But not all the websites have enough traffic. Why? In this article we will see how can we increase traffic to our websites, How to make it available in Google search?, How can we make Google to crawl our websites links?, and about Search Engine Optimization. I hope you will like it.
Please see this article in my blog.
Background
There are bof websites, and all the website owners are trying to increase their website traffic. This article is for those who are not an expert in SEO optimization and SEO traffic. Here we will discuss the points which I found useful for my website http://sibeeshpassion.com/ .
How To Increase Your Website Traffic
There are few points you must take care of:
Responsive
It simply means to work with all the devices. Now people are using mobiles and tabs for accessing web. So you just cannot force the users to use your website in desktop or pc, if you do so, they will simply exit from your site. It is important that we must create a website design in users way, means user can visit site on any device.
SibeeshPassion @desktop SibeeshPassion @desktop
sibeeshpassion @ Tab sibeeshpassion @ Tab
sibeeshpassion @ phone sibeeshpassion @ phone
Fast and reliable
As a matter of fact, it is important that our websites must load faster too. It should not make our users to wait for a long period. You can find out some tips here. It cannot just keep on loading.
loadinggif
Share your website links on Social Media
We all write posts/article/blogs etc, but that is not enough. You need to share your links on social media. You can share it to almost all the social media networks wherever you've an account. Most useful social media communities are the following:
  • Facebook
  • Linkedin
  • Twitter
  • Google Plus
  • Reddit
  • Whatsapp
sharingiscaring
Be active on Social Media
It is important that you must be active in all the social media, so that people can recognize you and your shares. Try to support others too by giving some shares for others posts. It is a kind of Give And Take policy. Being active is as important as we share our contents.
Be a member in communities
Communities can deliver a high traffic to your website. I would say join an on-line community according to your profession and help others. This is a great way of getting personal satisfaction too. In this way you can learn lot of things. Try this, you will come to know the importance.
knowledgeiswealth
If you are a software professional, you can join: C-SharpCorner, Codeproject http://www.asp.net/ These all are great communities where you can help and learn.
Create your own brand
Create your own on-line brand does not mean creating an office and all. As my site name is SibeeshPassion, I have created my own facebook page, twitter account, Google Plus page etc.
And always include your brand name each time you share something. Sounds good right?
Create mail signature with your site link
We all send mails right? And we do reply to our mails. So it is important that we must include our site name to our E-Mail signature. My email signature is:
Kindest Regards
Sibeesh Venu
www.sibeeshpassion.com

Include Translation
As our website is accessible to the entire world, we must have an option to translate our contents to users language. If you are using wordpress, you can find out so many plugins related to that. In this way you can make your site more readable. As more users are active on your website, the things will go good for you.
Active in forums
Nowadays there are so many forums available. You can choose according to your passion,profession. I am active on C-Sharp corner, Code Project, Asp.Net forums. And do not forget to include your website links to your forum signature too.
Include Meta Data and Meta Description
Google crawls your website according to the meta data and meta description you provide to each posts. So you must set meta data and meta description to all of your pages or posts. If you use wordpress there is a plugin called All In One SEO . I recommend that.
script

Always check your site metrics
It is recommended that you must check your site metrics regularly. You can either use google.com/analytics or http://www.alexa.com/. I recommended both.
check your site metrics
Alexa Site Metrics
graph
Google Analytics
Please be noted that there may be other good tools too. Please use those according to your needs. When you check your metrics, you can get some suggestions what you must do to make your website more efficient. You can find some reports on which keywords and which pages are being visited many times. And please do not forget to take care of the performance of such pages or posts.
And last but not the least
Avail all the available Google Services
Google is the master, by using Google service, you can earn money, increase traffic, so that you can be recognized. You can find out the Google services here.
Business

Conclusion
I hope you liked this article. Now please share me your valuable feedback. It means a lot to me. Thanks in advance.